Fix nightly spr & core perf test #899
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: LLM Performance Test | |
# Cancel previous runs in the PR when you push new commits | |
concurrency: | |
group: ${{ github.workflow }}-llm-performance-tests-${{ github.event.pull_request.number || github.run_id }} | |
cancel-in-progress: true | |
permissions: | |
contents: read | |
# Controls when the action will run. | |
on: | |
schedule: | |
- cron: "30 16 * * *" # GMT time, 16:30 GMT == 00:30 China | |
# please uncomment it for PR tests | |
pull_request: | |
# branches: [main] | |
# paths: | |
# - ".github/workflows/llm_performance_tests.yml" | |
# - "python/llm/test/benchmark/**" | |
# - "python/llm/dev/benchmark/all-in-one/**" | |
workflow_dispatch: | |
inputs: | |
arc: | |
description: "If trigger performance test on Arc" | |
required: false | |
type: boolean | |
default: true | |
spr: | |
description: "If trigger performance test on SPR" | |
required: false | |
type: boolean | |
default: true | |
core: | |
description: "If trigger performance test on Core" | |
required: false | |
type: boolean | |
default: true | |
igpu: | |
description: "If trigger performance test on iGPU" | |
required: false | |
type: boolean | |
default: true | |
workflow_call: | |
# A workflow run is made up of one or more jobs that can run sequentially or in parallel | |
jobs: | |
# llm-cpp-build: # please uncomment it for PR tests | |
# uses: ./.github/workflows/llm-binary-build.yml | |
llm-performance-test-on-arc: | |
if: ${{ github.event.schedule || ( github.event_name == 'workflow_dispatch' && inputs.arc ) || github.event.inputs.artifact == 'llm-performance-test-on-arc' || github.event.inputs.artifact == 'all' }} # please comment it for PR tests | |
# needs: llm-cpp-build # please uncomment it for PR tests | |
strategy: | |
fail-fast: false | |
matrix: | |
python-version: ["3.11"] | |
runs-on: [self-hosted, llm, perf] | |
env: | |
OMP_NUM_THREADS: 16 | |
THREAD_NUM: 16 | |
ANALYTICS_ZOO_ROOT: ${{ github.workspace }} | |
CSV_SAVE_PATH: ${{ (github.event.schedule || github.event_name == 'workflow_dispatch') && '/mnt/disk1/nightly_perf_gpu/' || '/mnt/disk1/pr_perf_gpu/' }} | |
steps: | |
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 | |
- name: Set up Python ${{ matrix.python-version }} | |
uses: actions/setup-python@v4 | |
with: | |
python-version: ${{ matrix.python-version }} | |
- name: Install dependencies | |
shell: bash | |
# pip install transformers_stream_generator for model internlm-chat-7b-8k | |
# pip install tiktoken for model Qwen-7B-Chat-10-12 | |
run: | | |
python -m pip install --upgrade pip | |
python -m pip install --upgrade wheel | |
python -m pip install --upgrade omegaconf | |
python -m pip install --upgrade pandas | |
python -m pip install --upgrade einops | |
python -m pip install --upgrade transformers_stream_generator | |
python -m pip install --upgrade tiktoken | |
# please uncomment it and comment the "Install IPEX-LLM from Pypi" part for PR tests | |
# - name: Download llm binary | |
# uses: ./.github/actions/llm/download-llm-binary | |
# - name: Run LLM install (all) test | |
# uses: ./.github/actions/llm/setup-llm-env | |
# with: | |
# extra-dependency: "xpu_2.1" | |
- name: Install IPEX-LLM from Pypi | |
shell: bash | |
run: | | |
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ | |
test_version_date=`date -d 'yesterday' '+%Y%m%d'` | |
if ! pip show ipex-llm | grep $test_version_date; then | |
echo "Did not install ipex-llm with excepted version $test_version_date" | |
exit 1 | |
fi | |
- name: Test installed xpu version | |
shell: bash | |
run: | | |
source /opt/intel/oneapi/setvars.sh | |
python -m pip install --upgrade pytest | |
python -m pip install --upgrade expecttest | |
bash python/llm/test/run-llm-install-tests.sh | |
- name: Test on xpu(transformers==4.36.2) | |
shell: bash | |
run: | | |
date_for_test_version=$(date -d yesterday +%Y-%m-%d) | |
sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py | |
source /opt/intel/oneapi/setvars.sh | |
export USE_XETLA=OFF | |
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 | |
cp python/llm/test/benchmark/arc-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml | |
cd python/llm/dev/benchmark/all-in-one | |
mkdir test_batch1 | |
mkdir test_batch2 | |
mkdir test_batch4 | |
# batch_size 1 | |
# hide time info | |
sed -i 's/str(end - st)/"xxxxxx"/g' run.py | |
# change csv name | |
sed -i 's/{today}/{today}_test1_batch1/g' run.py | |
python run.py | |
mv *.csv test_batch1 | |
# batch_size 2 | |
cd ../../../../../ | |
cp python/llm/test/benchmark/arc-perf-test-batch2.yaml python/llm/dev/benchmark/all-in-one/config.yaml | |
cd python/llm/dev/benchmark/all-in-one | |
# change csv name | |
sed -i 's/batch1/batch2/g' run.py | |
python run.py | |
mv *.csv test_batch2 | |
# batch_size 4 | |
cd ../../../../../ | |
cp python/llm/test/benchmark/arc-perf-test-batch4.yaml python/llm/dev/benchmark/all-in-one/config.yaml | |
cd python/llm/dev/benchmark/all-in-one | |
# change csv name | |
sed -i 's/batch2/batch4/g' run.py | |
python run.py | |
mv *.csv test_batch4 | |
- name: Test on xpu(transformers==4.37.0) | |
shell: bash | |
run: | | |
source /opt/intel/oneapi/setvars.sh | |
export USE_XETLA=OFF | |
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 | |
# upgrade transformers for model Qwen/Qwen1.5-7B-Chat | |
python -m pip install transformers==4.37.0 | |
# batch_size 1 | |
cp python/llm/test/benchmark/arc-perf-transformers-437.yaml python/llm/dev/benchmark/all-in-one/config.yaml | |
cd python/llm/dev/benchmark/all-in-one | |
# change csv name | |
sed -i 's/test1_batch4/test2_batch1/g' run.py | |
python run.py | |
mv *.csv test_batch1 | |
# batch_size 2 | |
cd ../../../../../ | |
cp python/llm/test/benchmark/arc-perf-transformers-437-batch2.yaml python/llm/dev/benchmark/all-in-one/config.yaml | |
cd python/llm/dev/benchmark/all-in-one | |
# change csv name | |
sed -i 's/batch1/batch2/g' run.py | |
python run.py | |
mv *.csv test_batch2 | |
# batch_size 4 | |
cd ../../../../../ | |
cp python/llm/test/benchmark/arc-perf-transformers-437-batch4.yaml python/llm/dev/benchmark/all-in-one/config.yaml | |
cd python/llm/dev/benchmark/all-in-one | |
# change csv name | |
sed -i 's/batch2/batch4/g' run.py | |
python run.py | |
mv *.csv test_batch4 | |
- name: Test on xpu(transformers==4.40.0) | |
shell: bash | |
run: | | |
source /opt/intel/oneapi/setvars.sh | |
export USE_XETLA=OFF | |
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 | |
# upgrade transformers for model Qwen/Qwen1.5-MoE-A2.7B-Chat | |
python -m pip install transformers==4.40.0 | |
python -m pip install trl | |
# batch_size 1 | |
cp python/llm/test/benchmark/arc-perf-transformers-440.yaml python/llm/dev/benchmark/all-in-one/config.yaml | |
cd python/llm/dev/benchmark/all-in-one | |
# change csv name | |
sed -i 's/test2_batch4/test3_batch1/g' run.py | |
python run.py | |
mv *.csv test_batch1 | |
- name: Concat csv and generate html | |
shell: bash | |
run: | | |
# batch_size 1 | |
cd python/llm/dev/benchmark/all-in-one/test_batch1 | |
python ../../../../test/benchmark/concat_csv.py | |
for file in *.csv; do | |
if [[ $file != *test* ]]; then | |
cp "$file" $CSV_SAVE_PATH/batch_size_1 | |
fi | |
done | |
python -m pip install pandas==1.5.3 | |
cd ../../../../test/benchmark | |
python csv_to_html.py -f $CSV_SAVE_PATH/batch_size_1 | |
# batch_size 2 | |
cd ../../../../ | |
cd python/llm/dev/benchmark/all-in-one/test_batch2 | |
python ../../../../test/benchmark/concat_csv.py | |
for file in *.csv; do | |
if [[ $file != *test* ]]; then | |
cp "$file" $CSV_SAVE_PATH/batch_size_2 | |
fi | |
done | |
cd ../../../../test/benchmark | |
python csv_to_html.py -f $CSV_SAVE_PATH/batch_size_2 | |
# batch_size 4 | |
cd ../../../../ | |
cd python/llm/dev/benchmark/all-in-one/test_batch4 | |
python ../../../../test/benchmark/concat_csv.py | |
for file in *.csv; do | |
if [[ $file != *test* ]]; then | |
cp "$file" $CSV_SAVE_PATH/batch_size_4 | |
fi | |
done | |
cd ../../../../test/benchmark | |
python csv_to_html.py -f $CSV_SAVE_PATH/batch_size_4 | |
- name: Merge and sort csv files of multiple batches and generate html | |
shell: bash | |
run: | | |
cd python/llm/test/benchmark | |
mkdir merged_temp | |
# go through all the files and go to merged_temp | |
cd ../../dev/benchmark/all-in-one/test_batch1 | |
for file in *.csv; do | |
if [[ $file != *test* ]]; then | |
cp "$file" ../../../../test/benchmark/merged_temp | |
fi | |
done | |
cd ../test_batch2 | |
for file in *.csv; do | |
if [[ $file != *test* ]]; then | |
cp "$file" ../../../../test/benchmark/merged_temp | |
fi | |
done | |
cd ../test_batch4 | |
for file in *.csv; do | |
if [[ $file != *test* ]]; then | |
cp "$file" ../../../../test/benchmark/merged_temp | |
fi | |
done | |
cd ../../../../test/benchmark | |
python merge_csv_batch.py -f ./merged_temp | |
cd merged_temp | |
find . -name "*batch*.csv" -delete | |
for file in *.csv; do | |
cp "$file" $CSV_SAVE_PATH/merged | |
done | |
cd .. | |
python csv_to_html.py -f $CSV_SAVE_PATH/merged | |
rm -r merged_temp | |
- name: Update html in parent folder | |
shell: bash | |
run: | | |
cd python/llm/test/benchmark | |
python update_html_in_parent_folder.py -f $CSV_SAVE_PATH | |
- name: Check and upload results to ftp | |
shell: bash | |
run: | | |
# batch_size 1 | |
cd python/llm/dev/benchmark/all-in-one/test_batch1 | |
python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-test.yaml | |
python ../../../../test/benchmark/check_results.py -c test2 -y ../../../../test/benchmark/arc-perf-transformers-437.yaml | |
python ../../../../test/benchmark/check_results.py -c test3 -y ../../../../test/benchmark/arc-perf-transformers-440.yaml | |
find . -name "*test*.csv" -delete | |
if [ ${{ github.event_name }} == "schedule" ] || [ ${{ github.event_name }} == "workflow_dispatch" ]; then | |
curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/ | |
fi | |
cd ../ | |
rm -r test_batch1 | |
# batch_size 2 | |
cd test_batch2 | |
python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-test-batch2.yaml | |
python ../../../../test/benchmark/check_results.py -c test2 -y ../../../../test/benchmark/arc-perf-transformers-437-batch2.yaml | |
find . -name "*test*.csv" -delete | |
if [ ${{ github.event_name }} == "schedule" ] || [ ${{ github.event_name }} == "workflow_dispatch" ]; then | |
curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/ | |
fi | |
cd ../ | |
rm -r test_batch2 | |
# batch_size 4 | |
cd test_batch4 | |
python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-test-batch4.yaml | |
python ../../../../test/benchmark/check_results.py -c test2 -y ../../../../test/benchmark/arc-perf-transformers-437-batch4.yaml | |
find . -name "*test*.csv" -delete | |
if [ ${{ github.event_name }} == "schedule" ] || [ ${{ github.event_name }} == "workflow_dispatch" ]; then | |
curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/ | |
fi | |
cd ../ | |
rm -r test_batch4 | |
llm-performance-test-on-spr: | |
# if: ${{ github.event.schedule || ( github.event_name == 'workflow_dispatch' && inputs.spr ) || github.event.inputs.artifact == 'llm-performance-test-on-spr' || github.event.inputs.artifact == 'all' }} # please comment it for PR tests | |
# needs: llm-cpp-build # please uncomment it for PR tests | |
strategy: | |
fail-fast: false | |
matrix: | |
python-version: ["3.11"] | |
runs-on: [self-hosted, llm, spr-perf] | |
env: | |
OMP_NUM_THREADS: 16 | |
THREAD_NUM: 16 | |
ANALYTICS_ZOO_ROOT: ${{ github.workspace }} | |
steps: | |
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 | |
- name: Set up Python ${{ matrix.python-version }} | |
uses: actions/setup-python@v4 | |
with: | |
python-version: ${{ matrix.python-version }} | |
- name: Install dependencies | |
shell: bash | |
run: | | |
python -m pip install --upgrade pip | |
python -m pip install --upgrade wheel | |
python -m pip install --upgrade omegaconf | |
python -m pip install --upgrade pandas | |
python -m pip install --upgrade einops | |
python -m pip install --upgrade tiktoken | |
python -m pip install --upgrade transformers_stream_generator | |
# please uncomment it and comment the "Install IPEX-LLM from Pypi" part for PR tests | |
- name: Download llm binary | |
uses: ./.github/actions/llm/download-llm-binary | |
- name: Run LLM install (all) test | |
uses: ./.github/actions/llm/setup-llm-env | |
# - name: Install IPEX-LLM from Pypi | |
# shell: bash | |
# run: | | |
# pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu | |
# test_version_date=`date -d 'yesterday' '+%Y%m%d'` | |
# if ! pip show ipex-llm | grep $test_version_date; then | |
# echo "Did not install ipex-llm with excepted version $test_version_date" | |
# exit 1 | |
# fi | |
- name: Test on cpu | |
shell: bash | |
run: | | |
date_for_test_version=$(date -d yesterday +%Y-%m-%d) | |
sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py | |
mv python/llm/test/benchmark/cpu-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml | |
cd python/llm/dev/benchmark/all-in-one | |
export http_proxy=${HTTP_PROXY} | |
export https_proxy=${HTTPS_PROXY} | |
source ipex-llm-init -t | |
export OMP_NUM_THREADS=48 | |
# hide time info | |
sed -i 's/str(end - st)/"xxxxxx"/g' run.py | |
python run.py | |
cp ./*.csv /mnt/disk1/models/nightly_perf_cpu | |
cd ../../../test/benchmark | |
python -m pip install pandas==1.5.3 | |
python csv_to_html.py -f /mnt/disk1/models/nightly_perf_cpu | |
cd /mnt/disk1/models/nightly_perf_cpu | |
for f in *.html; do | |
curl -T "$f" ${LLM_FTP_URL}/llm/nightly_perf/nightly_perf_cpu/ | |
done | |
llm-performance-test-on-core: | |
# if: ${{ github.event.schedule || ( github.event_name == 'workflow_dispatch' && inputs.core ) || github.event.inputs.artifact == 'llm-performance-test-on-core' || github.event.inputs.artifact == 'all' }} # please comment it for PR tests | |
# needs: llm-cpp-build # please uncomment it for PR tests | |
strategy: | |
fail-fast: false | |
matrix: | |
include: | |
- os: windows | |
platform: dp | |
python-version: "3.11" | |
# - os: windows | |
# platform: lp | |
# python-version: "3.11" | |
runs-on: [self-hosted, "${{ matrix.os }}", llm, perf-core, "${{ matrix.platform }}"] | |
env: | |
ANALYTICS_ZOO_ROOT: ${{ github.workspace }} | |
CSV_SAVE_PATH: ${{ (github.event.schedule || github.event_name == 'workflow_dispatch') && 'D:/action-runners/nightly_perf_core_' || 'D:/action-runners/pr_perf_core_' }}${{ matrix.platform }}/ | |
steps: | |
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 | |
- name: Set up Python ${{ matrix.python-version }} | |
uses: actions/setup-python@v4 | |
with: | |
python-version: ${{ matrix.python-version }} | |
- name: Install dependencies | |
shell: bash | |
run: | | |
python -m pip install --upgrade pip | |
python -m pip install --upgrade wheel | |
python -m pip install --upgrade omegaconf pandas | |
python -m pip install --upgrade tiktoken einops transformers_stream_generator | |
# please uncomment it and comment the "Install IPEX-LLM from Pypi" part for PR tests | |
- name: Download llm binary | |
uses: ./.github/actions/llm/download-llm-binary | |
- name: Run LLM install (all) test | |
uses: ./.github/actions/llm/setup-llm-env | |
# - name: Install IPEX-LLM from Pypi | |
# shell: bash | |
# run: | | |
# pip install --pre --upgrade ipex-llm[all] | |
# test_version_date=`date -d 'yesterday' '+%Y%m%d'` | |
# if ! pip show ipex-llm | grep $test_version_date; then | |
# echo "Did not install ipex-llm with excepted version $test_version_date" | |
# exit 1 | |
# fi | |
- name: Test on core ${{ matrix.platform }} | |
shell: bash | |
run: | | |
date_for_test_version=$(date -d yesterday +%Y-%m-%d) | |
sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py | |
mv python/llm/test/benchmark/core-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml | |
cd python/llm/dev/benchmark/all-in-one | |
export http_proxy=${HTTP_PROXY} | |
export https_proxy=${HTTPS_PROXY} | |
# hide time info | |
sed -i 's/str(end - st)/"xxxxxx"/g' run.py | |
python run.py | |
cp ./*.csv $CSV_SAVE_PATH | |
cd ../../../test/benchmark | |
python -m pip install pandas==1.5.3 | |
python csv_to_html.py -f $CSV_SAVE_PATH | |
cd ../../dev/benchmark/all-in-one/ | |
if [ ${{ github.event_name }} == "schedule" ] || [ ${{ github.event_name }} == "workflow_dispatch" ]; then | |
curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/core_${{ matrix.platform }}/ | |
fi | |
llm-performance-test-on-igpu: | |
if: ${{ github.event.schedule || ( github.event_name == 'workflow_dispatch' && inputs.igpu ) || github.event.inputs.artifact == 'llm-performance-test-on-igpu' || github.event.inputs.artifact == 'all' }} # please comment it for PR tests | |
# needs: llm-cpp-build # please uncomment it for PR tests | |
strategy: | |
fail-fast: false | |
matrix: | |
include: | |
- os: windows | |
python-version: "3.11" | |
runs-on: [self-hosted, "${{ matrix.os }}", llm, perf-igpu] | |
env: | |
ANALYTICS_ZOO_ROOT: ${{ github.workspace }} | |
steps: | |
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 | |
# TODO: Put the ipex-llm related install process for win gpu into a action function | |
# Please uncomment it and commment the install from pypi for PR tests | |
# - name: Download llm binary | |
# uses: ./.github/actions/llm/download-llm-binary | |
# - name: Prepare for install ipex-llm from source | |
# shell: bash | |
# run: | | |
# sed -i 's/"bigdl-core-xe-21==" + CORE_XE_VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py | |
# sed -i 's/"bigdl-core-xe-batch-21==" + CORE_XE_VERSION/"bigdl-core-xe-batch-21"/g' python/llm/setup.py | |
# sed -i 's/"bigdl-core-xe-addons-21==" + CORE_XE_VERSION/"bigdl-core-xe-addons-21"/g' python/llm/setup.py | |
# sed -i 's/"bigdl-core-xe-esimd-21==" + CORE_XE_VERSION/"bigdl-core-xe-esimd-21"/g' python/llm/setup.py | |
# - name: Install ipex-llm and other related packages (install from source) | |
# shell: cmd | |
# run: | | |
# call conda create -n igpu-perf python=${{ matrix.python-version }} libuv -y | |
# call conda activate igpu-perf | |
# pip install --upgrade pip | |
# pip install --upgrade wheel | |
# pip install --upgrade omegaconf pandas | |
# pip install --upgrade tiktoken einops transformers_stream_generator | |
# cd python\llm | |
# python setup.py clean --all bdist_wheel --win | |
# if not exist dist\ipex_llm*.whl (exit /b 1) | |
# for %%i in (dist\ipex_llm*.whl) do set whl_name=%%i | |
# pip install --pre --upgrade %whl_name%[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ | |
# if %ERRORLEVEL% neq 0 (exit /b 1) | |
# pip list | |
# call conda deactivate | |
- name: Determine desired ipex-llm version | |
shell: bash | |
run: | | |
test_version_date=`date -d 'yesterday' '+%Y%m%d'` | |
echo "TEST_VERSION_DATE=${test_version_date}" >> "$GITHUB_ENV" | |
- name: Install ipex-llm and other related packages (install from pypi) | |
shell: cmd | |
run: | | |
call conda create -n igpu-perf python=${{ matrix.python-version }} libuv -y | |
call conda activate igpu-perf | |
pip install --upgrade pip | |
pip install --upgrade wheel | |
pip install --upgrade omegaconf pandas | |
pip install --upgrade tiktoken einops transformers_stream_generator | |
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ | |
pip show ipex-llm | findstr %TEST_VERSION_DATE% | |
if %ERRORLEVEL% neq 0 ( | |
echo "Did not install ipex-llm with excepted version %TEST_VERSION_DATE%" | |
exit /b 1 | |
) | |
pip list | |
call conda deactivate | |
- name: Create env for html generation | |
shell: cmd | |
run: | | |
call conda create -n html-gen python=3.11 -y | |
call conda activate html-gen | |
pip install pandas==1.5.3 | |
pip install Jinja2 | |
pip install "numpy<2.0.0" | |
call conda deactivate | |
- name: Set directory envs & and fix generated csv date name | |
shell: bash | |
run: | | |
if [ ${{ github.event_name }} == "schedule" ] || [ ${{ github.event_name }} == "workflow_dispatch" ]; then | |
echo "CSV_SAVE_PATH=${CSV_NIGHTLY_PATH}" >> "$GITHUB_ENV" | |
else | |
echo "CSV_SAVE_PATH=${CSV_PR_PATH}" >> "$GITHUB_ENV" | |
fi | |
date_for_test_version=$(date -d yesterday +%Y-%m-%d) | |
echo "LOG_FILE=${date_for_test_version}_output.txt" >> "$GITHUB_ENV" | |
sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py | |
- name: Add extra warmup for chatglm3-6b int4+fp32 for more stable results | |
shell: bash | |
run: | | |
sed -i '/^\s*result = run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)/ i\ | |
if repo_id in ["THUDM/chatglm3-6b", "THUDM/glm-4-9b-chat"]:\ | |
run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming) | |
' python/llm/dev/benchmark/all-in-one/run.py | |
- name: Prepare igpu perf test (32-32) | |
shell: bash | |
run: | | |
# hide time info | |
# sed -i 's/str(end - st)/"xxxxxx"/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i 's/{api}-results-{today}.csv/32-32-{api}-results-{today}_test1.csv/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32.yaml | |
- name: Test on igpu (32-32) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
REM for llava | |
set TRANSFORMERS_OFFLINE=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\32-32.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\32-32\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Prepare igpu perf test for transformers 4.37 (32-32) | |
shell: bash | |
run: | | |
sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_437.yaml | |
- name: Test on igpu for transformers 4.37 (32-32) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
pip install transformers==4.37.0 | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\32-32_437.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\32-32\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Concat csv and generate html (32-32) | |
shell: cmd | |
run: | | |
call conda activate html-gen | |
cd python\llm\dev\benchmark\all-in-one | |
python ..\..\..\test\benchmark\concat_csv.py | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
del /q *test*.csv | |
move *.csv %CSV_SAVE_PATH%\32-32\ | |
cd ..\..\..\test\benchmark | |
python csv_to_html.py -f %CSV_SAVE_PATH%\32-32\ | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
move %CSV_SAVE_PATH%\32-32\*.html %CSV_SAVE_PATH% | |
call conda deactivate | |
# TODO: create a action function here for different input | |
# 1024-128 | |
- name: Prepare igpu perf test (1024-128) | |
shell: bash | |
run: | | |
sed -i 's/32-32/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128.yaml | |
- name: Test on igpu (1024-128) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
pip install transformers==4.36.2 | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
REM for llava | |
set TRANSFORMERS_OFFLINE=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\1024-128.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Prepare igpu perf test for transformers 4.37 (1024-128) | |
shell: bash | |
run: | | |
sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_437.yaml | |
- name: Test on igpu for transformers 4.37 (1024-128) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
pip install transformers==4.37.0 | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\1024-128_437.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Concat csv and generate html (1024-128) | |
shell: cmd | |
run: | | |
call conda activate html-gen | |
cd python\llm\dev\benchmark\all-in-one | |
python ..\..\..\test\benchmark\concat_csv.py | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
del /q *test*.csv | |
move *.csv %CSV_SAVE_PATH%\1024-128\ | |
cd ..\..\..\test\benchmark | |
python csv_to_html.py -f %CSV_SAVE_PATH%\1024-128\ | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
move %CSV_SAVE_PATH%\1024-128\*.html %CSV_SAVE_PATH% | |
call conda deactivate | |
# 2048-256 | |
- name: Prepare igpu perf test (2048-256) | |
shell: bash | |
run: | | |
sed -i 's/1024-128/2048-256/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256.yaml | |
- name: Test on igpu (2048-256) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
pip install transformers==4.36.2 | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
REM for llava | |
set TRANSFORMERS_OFFLINE=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\2048-256.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\2048-256\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Prepare igpu perf test for transformers 4.37 (2048-256) | |
shell: bash | |
run: | | |
sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_437.yaml | |
- name: Test on igpu for transformers 4.37 (2048-256) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
pip install transformers==4.37.0 | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\2048-256_437.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\2048-256\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Concat csv and generate html (2048-256) | |
shell: cmd | |
run: | | |
call conda activate html-gen | |
cd python\llm\dev\benchmark\all-in-one | |
python ..\..\..\test\benchmark\concat_csv.py | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
del /q *test*.csv | |
move *.csv %CSV_SAVE_PATH%\2048-256\ | |
cd ..\..\..\test\benchmark | |
python csv_to_html.py -f %CSV_SAVE_PATH%\2048-256\ | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
move %CSV_SAVE_PATH%\2048-256\*.html %CSV_SAVE_PATH% | |
call conda deactivate | |
# load_low_bit 1024-128 | |
- name: Prepare igpu perf test (load_low_bit 1024-128) | |
shell: bash | |
run: | | |
sed -i 's/2048-256/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit.yaml | |
- name: Test on igpu (load_low_bit 1024-128) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
pip install transformers==4.36.2 | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
REM for llava | |
set TRANSFORMERS_OFFLINE=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\1024-128_loadlowbit.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\1024-128_loadlowbit\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Prepare igpu perf test for transformers 4.37 (load_low_bit 1024-128) | |
shell: bash | |
run: | | |
sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit_437.yaml | |
- name: Test on igpu for transformers 4.37 (load_low_bit 1024-128) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
pip install transformers==4.37.0 | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\1024-128_loadlowbit_437.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\1024-128_loadlowbit\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Concat csv and generate html (load_low_bit 1024-128) | |
shell: cmd | |
run: | | |
call conda activate html-gen | |
cd python\llm\dev\benchmark\all-in-one | |
python ..\..\..\test\benchmark\concat_csv.py | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
del /q *test*.csv | |
move *.csv %CSV_SAVE_PATH%\1024-128_loadlowbit\ | |
cd ..\..\..\test\benchmark | |
python csv_to_html.py -f %CSV_SAVE_PATH%\1024-128_loadlowbit\ | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
move %CSV_SAVE_PATH%\1024-128_loadlowbit\*.html %CSV_SAVE_PATH% | |
call conda deactivate | |
- name: Prepare igpu perf test (int4+fp16 1024-128) | |
shell: bash | |
run: | | |
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml | |
- name: Test on igpu (int4+fp16 1024-128) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
pip install transformers==4.36.2 | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
REM for llava | |
set TRANSFORMERS_OFFLINE=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Prepare igpu perf test for transformers 4.37 (int4+fp16 1024-128) | |
shell: bash | |
run: | | |
sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py | |
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml | |
- name: Test on igpu for transformers 4.37 (int4+fp16 1024-128) | |
shell: cmd | |
run: | | |
call conda activate igpu-perf | |
pip install transformers==4.37.0 | |
set SYCL_CACHE_PERSISTENT=1 | |
set BIGDL_LLM_XMX_DISABLED=1 | |
cd python\llm\dev\benchmark\all-in-one | |
move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_437.yaml config.yaml | |
set PYTHONIOENCODING=utf-8 | |
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
call conda deactivate | |
- name: Concat csv and generate html (int4+fp16 1024-128) | |
shell: cmd | |
run: | | |
call conda activate html-gen | |
cd python\llm\dev\benchmark\all-in-one | |
python ..\..\..\test\benchmark\concat_csv.py | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
del /q *test*.csv | |
move *.csv %CSV_SAVE_PATH%\1024-128_int4_fp16\ | |
cd ..\..\..\test\benchmark | |
python csv_to_html.py -f %CSV_SAVE_PATH%\1024-128_int4_fp16\ | |
if %ERRORLEVEL% neq 0 (exit /b 1) | |
move %CSV_SAVE_PATH%\1024-128_int4_fp16\*.html %CSV_SAVE_PATH% | |
call conda deactivate | |
# TODO: avoid duplicated code | |
- name: Upload results to ftp | |
if: ${{ always() }} | |
shell: cmd | |
run: | | |
cd %CSV_SAVE_PATH% | |
IF "${{ github.event_name }}"=="schedule" ( | |
for %%f in (*.html) do ( | |
curl -T "%%f" %FTP_IGPU_NIGHTLY_PERF_PATH% | |
) | |
) | |
IF "${{ github.event_name }}"=="workflow_dispatch" ( | |
for %%f in (*.html) do ( | |
curl -T "%%f" %FTP_IGPU_NIGHTLY_PERF_PATH% | |
) | |
) | |
# for test on machine when encountering error | |
# - name: Remove conda env | |
# if: ${{ always() }} | |
# shell: cmd | |
# run: | | |
# call conda env remove -n igpu-perf -y | |