-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathMultiplePerformance.sh
More file actions
223 lines (186 loc) · 9.96 KB
/
MultiplePerformance.sh
File metadata and controls
223 lines (186 loc) · 9.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
#!/bin/bash
#SBATCH --job-name=vllm_deepseek
#SBATCH --nodes=4
#SBATCH --nodelist=dgpu-srv002,dgpu-srv003,dgpu-srv004,dgpu-srv005
#SBATCH --exclusive
#SBATCH --time=14:00:00
start_docker() {
echo "[INFO](start_docker) Starting docker containers on all nodes"
srun --ntasks=4 --ntasks-per-node=1 --gres=none --mem=1G --overlap bash /nfs_share/disk2/QLF/vLLMContainer.sh
# srun --nodelist=dgpu-srv004 --gres=none --mem=1G --overlap bash /nfs_share/disk2/QLF/vLLMContainer.sh
# srun --nodelist=dgpu-srv002 --gres=none --mem=1G --overlap bash /nfs_share/disk2/QLF/vLLMContainer.sh
# srun --nodelist=dgpu-srv003 --gres=none --mem=1G --overlap bash /nfs_share/disk2/QLF/vLLMContainer.sh
# srun --nodelist=dgpu-srv005 --gres=none --mem=1G --overlap bash /nfs_share/disk2/QLF/vLLMContainer.sh
}
start_ray_cluster() {
echo "[INFO](start_ray_cluster) Starting ray cluster"
# srun --nodelist=dgpu-srv002 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot ray stop -f
# srun --nodelist=dgpu-srv004 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot ray stop -f
# srun --nodelist=dgpu-srv003 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot ray stop -f
# srun --nodelist=dgpu-srv005 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot ray stop -f
srun --ntasks=4 --ntasks-per-node=1 --gres=none --mem=1G --overlap \
docker exec vllm_deepseek_v3.1_boot ray stop -f
srun --nodes=1 --nodelist=dgpu-srv004 --gres=none --mem=1G --overlap \
docker exec vllm_deepseek_v3.1_boot bash -c "source \
/nfs_share/disk2/QLF/srv004/ds_v3.1_srv004_env_1.sh \
&& bash /nfs_share/disk2/QLF/srv004/ray_start_1.sh"
srun --nodes=1 --nodelist=dgpu-srv002 --gres=none --mem=1G docker exec vllm_deepseek_v3.1_boot bash -c "source /nfs_share/disk2/QLF/srv002/ds_v3.1_srv002_env_1.sh && bash /nfs_share/disk2/QLF/srv002/ray_start_1.sh"
srun --nodes=1 --nodelist=dgpu-srv003 --gres=none --mem=1G docker exec vllm_deepseek_v3.1_boot bash -c "source /nfs_share/disk2/QLF/srv003/ds_v3.1_srv003_env_1.sh && bash /nfs_share/disk2/QLF/srv003/ray_start_1.sh"
srun --nodes=1 --nodelist=dgpu-srv005 --gres=none --mem=1G docker exec vllm_deepseek_v3.1_boot bash -c "source /nfs_share/disk2/QLF/srv005/ds_v3.1_srv005_env_1.sh && bash /nfs_share/disk2/QLF/srv005/ray_start_1.sh"
srun --nodes=1 --nodelist=dgpu-srv004 --gres=none --mem=1G docker exec vllm_deepseek_v3.1_boot ray list nodes
}
start_model_server() {
local max_num_seqs=$1
local max_num_batched_tokens=$2
local test_result_sub_dir=$3
local log_file="${test_result_sub_dir}/server_max_num_seqs${max_num_seqs}_max_num_batched_tokens${max_num_batched_tokens}_$$.log"
echo "[INFO](start_model_server) Starting model server on srv004"
# nohup srun --nodelist=dgpu-srv004 --mem=500G docker exec vllm_deepseek_v3.1_boot \
# bash /nfs_share/disk2/QLF/srv004/ds_v3.1_srv004_1.sh --max-num-seqs $max_num_seqs \
# --max-num-batched-tokens $max_num_batched_tokens &> \
# "${log_file}" &
srun --nodes=1 --nodelist=dgpu-srv004 --mem=500G docker exec vllm_deepseek_v3.1_boot \
bash /nfs_share/disk2/QLF/srv004/ds_v3.1_srv004_1.sh \
--max-num-seqs $max_num_seqs --max-num-batched-tokens $max_num_batched_tokens &> \
"${log_file}" &
# 等待服务器完全启动
echo "[INFO](start_model_server) Waiting for server to start up..."
local wait_time=0
local max_wait=600 # 最多等待600秒
local check_interval=300 # 每300秒检查一次
while [ $wait_time -lt $max_wait ]; do
sleep $check_interval
wait_time=$((wait_time + check_interval))
# 检查日志文件是否存在
if [ -f "${log_file}" ]; then
# 检查是否包含启动完成的消息
if grep -q "Application startup complete." "${log_file}"; then
echo "[INFO](start_model_server) Server startup complete! (waited ${wait_time}s)"
return 0
fi
# 检查是否有错误
if grep -q "ERROR\|CRITICAL\|Exception\|Traceback" "${log_file}"; then
echo "[ERROR](start_model_server) Server startup failed. Check log file for details:"
echo " ${log_file}"
return 1
fi
fi
echo "[INFO](start_model_server) Still waiting for server to start... (${wait_time}s / ${max_wait}s)"
done
echo "[ERROR](start_model_server) Server startup timeout after ${max_wait} seconds"
return 1
}
run_performance_test() {
local max_num_seqs=$1
local request_rate=$2
# local max_concurrency=$3
local max_num_batched_tokens=$3
local test_result_sub_dir=$4
local log_file="${test_result_sub_dir}/request_rate${request_rate}.log"
# Retry mechanism - up to 5 attempts
local max_attempts=5
local attempt=1
while [ $attempt -le $max_attempts ]; do
echo "[INFO](run_performance_test) Running performance benchmark,number of attempts: $attempt"
# Run the performance test
srun --nodes=1 --nodelist=dgpu-srv004 --gres=none --mem=1G --overlap \
docker exec vllm_deepseek_v3.1_boot \
bash /nfs_share/disk2/QLF/srv004/ds_v3.1_performance_1.sh \
--request-rate $request_rate 2>&1 | tee "${log_file}"
# Check if srun error occurred
if grep -q "srun: error" "${log_file}"; then
echo "[WARNING](run_performance_test) The "srun error" was detected in the log."
attempt=$((attempt + 1))
echo "[INFO](run_performance_test) Retrying after 1 second..."
sleep 1
stop_model_server
start_ray_cluster
start_model_server $max_num_seqs $max_num_batched_tokens $test_result_sub_dir
if [ $? -ne 0 ]; then
echo "[ERROR](run_performance_test) Failed to restart model server"
return 1
fi
continue
fi
# 提取 Successful requests 的值
successful_requests=$(grep "Successful requests:" "${log_file}" | awk '{print $3}')
# 检查 Successful requests 是否为 7000
if [ -z "$successful_requests" ]; then
echo "[WARNING](run_performance_test) No "Successful requests" information was found."
attempt=$((attempt + 1))
echo "[INFO](run_performance_test) Retrying after 1 second..."
sleep 1
continue
elif [ "$successful_requests" -ne 7000 ]; then
echo "[WARNING](run_performance_test) The number of "Successful requests" is ${successful_requests},which is not equal to 7000."
attempt=$((attempt + 1))
echo "[INFO](run_performance_test) Retrying after 1 second..."
sleep 1
continue
fi
# 如果执行到这里,说明测试成功
echo "[INFO](run_performance_test) Benchmark was successfully completed."
return 0
done
# 如果达到最大重试次数
echo "[Error](run_performance_test) Failed after five attempts."
return 1
}
# 停止Server时,需要kill相应的进程
stop_model_server() {
echo "[INFO](stop_model_server) Stopping model server"
# 杀死运行中的 vLLM 服务器进程
srun --nodelist=dgpu-srv004 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot pkill -f "vllm.entrypoints.openai.api_server" || true
}
stop_ray_cluster() {
echo "[INFO](stop_ray_cluster) Stopping ray cluster"
# srun --nodelist=dgpu-srv002 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot ray stop -f
# srun --nodelist=dgpu-srv004 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot ray stop -f
# srun --nodelist=dgpu-srv003 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot ray stop -f
# srun --nodelist=dgpu-srv005 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot ray stop -f
srun --ntasks=4 --ntasks-per-node=1 --gres=none --mem=1G --overlap docker exec vllm_deepseek_v3.1_boot ray stop -f
}
# 主执行流程
echo "[INFO] Starting vLLM cluster performance testing"
start_docker
start_ray_cluster
# 创建结果目录
# timestamp=$(date +%Y%m%d_%H%M%S)
# test_result_dir="/nfs_share/disk2/QLF/TestResults${timestamp}/"
# mkdir -p $test_result_dir
# # 定义测试参数数组
# MAX_NUM_SEQS_VALUES=(256 384 512)
# MAX_NUM_BATCHED_TOKENS_VALUES=(2048 4096 6144)
# REQUEST_RATE_VALUES=(4 8 16 32 40)
# # 第一层循环:遍历 max-num-seqs
# for max_num_seqs in "${MAX_NUM_SEQS_VALUES[@]}"; do
# for max_num_batched_tokens in "${MAX_NUM_BATCHED_TOKENS_VALUES[@]}"; do
# # 启动模型服务器
# test_result_sub_dir="${test_result_dir}/max_num_seqs${max_num_seqs}_max_num_batched_tokens${max_num_batched_tokens}"
# mkdir -p $test_result_sub_dir
# start_ray_cluster
# start_model_server $max_num_seqs $max_num_batched_tokens $test_result_sub_dir
# if [ $? -ne 0 ]; then
# echo "[ERROR] Exiting the program..."
# exit 1
# fi
# # max_concurrency=$(echo "scale=0; $max_num_seqs * 1.2 / 1" | bc)
# # 第三层循环:遍历 request-rate
# for request_rate in "${REQUEST_RATE_VALUES[@]}"; do
# # 运行性能测试
# # run_performance_test $max_num_seqs $request_rate $max_concurrency $test_result_sub_dir
# run_performance_test $max_num_seqs $request_rate $max_num_batched_tokens $test_result_sub_dir
# if [ $? -ne 0 ]; then
# echo "[ERROR] Exiting the program..."
# exit 1
# fi
# done
# # 停止当前的模型服务器
# stop_model_server
# echo "[INFO] Completed all tests for max-num-seqs=$max_num_seqs & max_num_batched_tokens=$max_num_batched_tokens"
# echo "[INFO] Results saved in: $test_result_sub_dir"
# done
# done
# echo "[INFO] All operations completed"
# stop_model_server
stop_ray_cluster