diff --git a/README.md b/README.md index 250ced3..752c3d5 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,9 @@ conda create -n vllm-ltr python=3.10 conda activate vllm-ltr git clone https://github.com/hao-ai-lab/vllm-ltr.git cd vllm-ltr -pip install -e . +conda install pytorch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 pytorch-cuda=12.1 -c pytorch -c nvidia #install pytorch according to your cuda version +pip install -e . #install from source +pip install flash-attn torchaudio==2.2.1 torchvision==0.17.1 numpy==1.25.2 fschat accelerate gcsfs scikit-learn scipy matplotlib evaluate #extra libs ``` ## Reproduce Results diff --git a/benchmarks/README.md b/benchmarks/README.md index a40d14c..08acc65 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -40,13 +40,15 @@ python benchmark_append_dataset_PO.py --dataset llama3-8b-sharegpt-test-t1-s0-81 ## Download Pre-trained Models -For model downloads, you can simply use: +For predictor downloads, you can simply use: ```bash mkdir -p MODEL/results huggingface-cli download LLM-ltr/OPT-Predictors --local-dir MODEL/results ``` +or check the `vllm-ltr/train` directory for training predictors. + ## Reproducing Results ### Reproduce Table 1 diff --git a/requirements-common.txt b/requirements-common.txt index bb3177b..3de0f98 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -16,8 +16,3 @@ lm-format-enforcer == 0.9.3 outlines == 0.0.34 # Requires torch >= 2.1.0 typing_extensions filelock >= 3.10.4 # filelock starts to support `mode` argument from 3.10.4 -gcsfs -scikit-learn -scipy -matplotlib -evaluate diff --git a/requirements-cuda.txt b/requirements-cuda.txt index f45a321..ed05804 100644 --- a/requirements-cuda.txt +++ b/requirements-cuda.txt @@ -8,9 +8,4 @@ vllm-nccl-cu12>=2.18,<2.19 # for downloading nccl library torch == 2.2.1 xformers == 0.0.25 # Requires PyTorch 2.2.1 -torchaudio==2.2.1 -torchvision==0.17.1 -flash-attn -numpy==1.25.2 -fschat -accelerate +