| set -euo pipefail | |
| CONDA_ROOT="/mnt/hwfile/zhangfan.p/miniconda3" | |
| ENV_PATH="$CONDA_ROOT/envs/llamafac" | |
| export PATH="$ENV_PATH/bin:$PATH" | |
| read -p "Enter config (0 for fine-tuned, 1 for base): " CONFIG | |
| export CONFIG=${CONFIG:-0} | |
| if [ $CONFIG -eq 0 ]; then | |
| export CUDA_VISIBLE_DEVICES=1 | |
| export MODEL_NAME="ea-dev/eval-agent-vbench-base-table" | |
| export PORT=12333 | |
| export GPU_MEMORY_UTILIZATION=0.7 | |
| else | |
| export CUDA_VISIBLE_DEVICES=2 | |
| export MODEL_NAME="qwen/Qwen2.5-3B-Instruct" | |
| export PORT=12334 | |
| export GPU_MEMORY_UTILIZATION=0.7 | |
| fi | |
| # Launch using vllm serve command | |
| echo "Starting Qwen2.5-3B eval agent server on 0.0.0.0:${PORT}..." | |
| echo "Model: ${MODEL_NAME}" | |
| echo "GPU Memory Utilization: ${GPU_MEMORY_UTILIZATION}" | |
| exec python -u -m vllm.entrypoints.openai.api_server ${MODEL_NAME} \ | |
| --host 0.0.0.0 \ | |
| --port ${PORT} \ | |
| --gpu-memory-utilization ${GPU_MEMORY_UTILIZATION} \ | |
| --trust-remote-code \ | |
| --max-model-len 16384 \ | |
| --served-model-name eval-agent |