From 330c699813212e4909fdf12495731a2212f8cccf Mon Sep 17 00:00:00 2001 From: krishung5 Date: Wed, 15 Jan 2025 12:34:12 -0800 Subject: [PATCH 1/4] L0_openai_trtllm improvement --- qa/L0_openai/test.sh | 45 ++++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/qa/L0_openai/test.sh b/qa/L0_openai/test.sh index 2bff43fafe..4dce471d60 100755 --- a/qa/L0_openai/test.sh +++ b/qa/L0_openai/test.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# Copyright 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -53,22 +53,35 @@ function prepare_tensorrtllm() { # FIXME: Remove when testing TRT-LLM containers built from source pip install -r requirements.txt - MODEL="llama-3-8b-instruct" + MODEL="meta-llama/Meta-Llama-3.1-8B-Instruct" MODEL_REPO="tests/tensorrtllm_models" - rm -rf ${MODEL_REPO} - - # FIXME: This may require an upgrade each release to match the TRT-LLM version, - # and would likely be easier to use trtllm-build directly for test purposes. - # Use Triton CLI to prepare model repository for testing - pip install git+https://github.com/triton-inference-server/triton_cli.git@0.1.1 - # NOTE: Could use ENGINE_DEST_PATH set to NFS mount for pre-built engines in future - triton import \ - --model ${MODEL} \ - --backend tensorrtllm \ - --model-repository "${MODEL_REPO}" - - # WAR for tests expecting default name of "tensorrt_llm_bls" - mv "${MODEL_REPO}/${MODEL}" "${MODEL_REPO}/tensorrt_llm_bls" + mkdir -p ${MODEL_REPO} + cp /app/all_models/inflight_batcher_llm/* "${MODEL_REPO}" -r + + # 1. Download model from HF + huggingface-cli download ${MODEL} + + HF_LLAMA_MODEL=`python3 -c "from pathlib import Path; from huggingface_hub import hf_hub_download; print(Path(hf_hub_download('${MODEL}', filename='config.json')).parent)"` + CKPT_PATH=/tmp/ckpt/llama/3.1-8b-instruct/ + ENGINE_PATH=/tmp/engines/llama/3.1-8b-instruct/ + + # 2. Convert weights + python3 /app/examples/llama/convert_checkpoint.py --model_dir ${HF_LLAMA_MODEL} \ + --output_dir ${CKPT_PATH} \ + --dtype float16 + + # 3. Build engine + trtllm-build --checkpoint_dir ${CKPT_PATH} \ + --gemm_plugin auto \ + --output_dir ${ENGINE_PATH} + + # 4. Prepare model repository + FILL_TEMPLATE="/app/tools/fill_template.py" + python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/preprocessing/config.pbtxt tokenizer_dir:${HF_LLAMA_MODEL},triton_max_batch_size:64,preprocessing_instance_count:1 + python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/postprocessing/config.pbtxt tokenizer_dir:${HF_LLAMA_MODEL},triton_max_batch_size:64,postprocessing_instance_count:1 + python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/tensorrt_llm_bls/config.pbtxt triton_max_batch_size:64,decoupled_mode:False,bls_instance_count:1,accumulate_tokens:False,logits_datatype:TYPE_FP32 + python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/ensemble/config.pbtxt triton_max_batch_size:64,logits_datatype:TYPE_FP32 + python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/tensorrt_llm/config.pbtxt triton_backend:tensorrtllm,triton_max_batch_size:64,decoupled_mode:False,max_beam_width:1,engine_dir:${ENGINE_PATH},batching_strategy:inflight_fused_batching,max_queue_delay_microseconds:0,encoder_input_features_data_type:TYPE_FP16,logits_datatype:TYPE_FP32 } function pre_test() { From a4b94be9d8778ff2fee54c29cf7ad462be94fbba Mon Sep 17 00:00:00 2001 From: krishung5 Date: Fri, 17 Jan 2025 00:16:54 -0800 Subject: [PATCH 2/4] Update parameters. Remove ensemble model. --- qa/L0_openai/test.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/qa/L0_openai/test.sh b/qa/L0_openai/test.sh index 4dce471d60..f22874bd35 100755 --- a/qa/L0_openai/test.sh +++ b/qa/L0_openai/test.sh @@ -57,6 +57,8 @@ function prepare_tensorrtllm() { MODEL_REPO="tests/tensorrtllm_models" mkdir -p ${MODEL_REPO} cp /app/all_models/inflight_batcher_llm/* "${MODEL_REPO}" -r + # Ensemble model is not needed for the test + rm -rf ${MODEL_REPO}/ensemble # 1. Download model from HF huggingface-cli download ${MODEL} @@ -77,11 +79,11 @@ function prepare_tensorrtllm() { # 4. Prepare model repository FILL_TEMPLATE="/app/tools/fill_template.py" - python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/preprocessing/config.pbtxt tokenizer_dir:${HF_LLAMA_MODEL},triton_max_batch_size:64,preprocessing_instance_count:1 + python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/preprocessing/config.pbtxt tokenizer_dir:${HF_LLAMA_MODEL},triton_max_batch_size:64,preprocessing_instance_count:1,max_queue_size:0 python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/postprocessing/config.pbtxt tokenizer_dir:${HF_LLAMA_MODEL},triton_max_batch_size:64,postprocessing_instance_count:1 - python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/tensorrt_llm_bls/config.pbtxt triton_max_batch_size:64,decoupled_mode:False,bls_instance_count:1,accumulate_tokens:False,logits_datatype:TYPE_FP32 - python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/ensemble/config.pbtxt triton_max_batch_size:64,logits_datatype:TYPE_FP32 - python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/tensorrt_llm/config.pbtxt triton_backend:tensorrtllm,triton_max_batch_size:64,decoupled_mode:False,max_beam_width:1,engine_dir:${ENGINE_PATH},batching_strategy:inflight_fused_batching,max_queue_delay_microseconds:0,encoder_input_features_data_type:TYPE_FP16,logits_datatype:TYPE_FP32 + python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/tensorrt_llm_bls/config.pbtxt triton_max_batch_size:64,decoupled_mode:True,bls_instance_count:1,accumulate_tokens:False,logits_datatype:TYPE_FP32 + # python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/ensemble/config.pbtxt triton_max_batch_size:64,logits_datatype:TYPE_FP32 + python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/tensorrt_llm/config.pbtxt triton_backend:tensorrtllm,triton_max_batch_size:64,decoupled_mode:True,max_beam_width:1,engine_dir:${ENGINE_PATH},batching_strategy:inflight_fused_batching,max_queue_size:0,max_queue_delay_microseconds:1000,encoder_input_features_data_type:TYPE_FP16,logits_datatype:TYPE_FP32,exclude_input_in_output:True } function pre_test() { From 199266ec8cb1e522a079a46f63f84153b1241a93 Mon Sep 17 00:00:00 2001 From: krishung5 Date: Fri, 17 Jan 2025 00:22:28 -0800 Subject: [PATCH 3/4] Remove unused comment --- qa/L0_openai/test.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/qa/L0_openai/test.sh b/qa/L0_openai/test.sh index f22874bd35..4a65a84d2a 100755 --- a/qa/L0_openai/test.sh +++ b/qa/L0_openai/test.sh @@ -82,7 +82,6 @@ function prepare_tensorrtllm() { python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/preprocessing/config.pbtxt tokenizer_dir:${HF_LLAMA_MODEL},triton_max_batch_size:64,preprocessing_instance_count:1,max_queue_size:0 python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/postprocessing/config.pbtxt tokenizer_dir:${HF_LLAMA_MODEL},triton_max_batch_size:64,postprocessing_instance_count:1 python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/tensorrt_llm_bls/config.pbtxt triton_max_batch_size:64,decoupled_mode:True,bls_instance_count:1,accumulate_tokens:False,logits_datatype:TYPE_FP32 - # python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/ensemble/config.pbtxt triton_max_batch_size:64,logits_datatype:TYPE_FP32 python3 ${FILL_TEMPLATE} -i ${MODEL_REPO}/tensorrt_llm/config.pbtxt triton_backend:tensorrtllm,triton_max_batch_size:64,decoupled_mode:True,max_beam_width:1,engine_dir:${ENGINE_PATH},batching_strategy:inflight_fused_batching,max_queue_size:0,max_queue_delay_microseconds:1000,encoder_input_features_data_type:TYPE_FP16,logits_datatype:TYPE_FP32,exclude_input_in_output:True } From fd078fd43f3be10b015e92c2d16f4ea8fed8ea85 Mon Sep 17 00:00:00 2001 From: krishung5 Date: Fri, 17 Jan 2025 14:39:52 -0800 Subject: [PATCH 4/4] Set max batch size to 128 --- qa/L0_openai/test.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qa/L0_openai/test.sh b/qa/L0_openai/test.sh index 4a65a84d2a..0921bce98e 100755 --- a/qa/L0_openai/test.sh +++ b/qa/L0_openai/test.sh @@ -73,8 +73,10 @@ function prepare_tensorrtllm() { --dtype float16 # 3. Build engine + # max_batch_size set to 128 to avoid OOM errors trtllm-build --checkpoint_dir ${CKPT_PATH} \ --gemm_plugin auto \ + --max_batch_size 128 \ --output_dir ${ENGINE_PATH} # 4. Prepare model repository