Skip to content

Add llm-ppl workflow #1

Add llm-ppl workflow

Add llm-ppl workflow #1

name: LLM Perplexity Evalution
# Cancel previous runs in the PR when you push new commits
concurrency:
group: ${{ github.workflow }}-llm-nightly-test-${{ github.event.pull_request.number || github.run_id }}
cancel-in-progress: true
# Controls when the action will run.
on:
schedule:
- cron: "00 13 * * 1,2,3,4,5" # GMT time, 13:00 GMT == 21:00 China
pull_request:
branches: [main]
paths:
- ".github/workflows/llm-ppl-evaluation.yml"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
inputs:
model_name:
description: 'Model names, seperated by comma and must be quoted.'
required: true
type: string
precision:
description: 'Precisions, seperated by comma and must be quoted.'
required: true
type: string
dataset:
description: 'Dataset, seperated by comma and must be quoted.'
required: true
type: string
runs-on:
description: 'Labels to filter the runners, seperated by comma and must be quoted.'
default: "accuracy"
required: false
type: string
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
llm-cpp-build:
uses: ./.github/workflows/llm-binary-build.yml
set-matrix:
runs-on: ubuntu-latest
outputs:
model_name: ${{ steps.set-matrix.outputs.model_name }}
precision: ${{ steps.set-matrix.outputs.precision }}
dataset: ${{ steps.set-matrix.outputs.dataset }}
runner: ${{ steps.set-matrix.outputs.runner }}
steps:
- name: set-nightly-env
if: ${{github.event_name == 'schedule'}}
env:
NIGHTLY_MATRIX_MODEL_NAME: '["stablelm-3b-4e1t","Mistral-7B-v0.1"]'
NIGHTLY_MATRIX_DATASET: '["wikitext"]'
NIGHTLY_MATRIX_PRECISION: '["mixed_fp4", "fp8"]'
NIGHTLY_LABELS: '["self-hosted", "llm", "accuracy"]'
run: |
echo "model_name=$NIGHTLY_MATRIX_MODEL_NAME" >> $GITHUB_ENV
echo "precision=$NIGHTLY_MATRIX_PRECISION" >> $GITHUB_ENV
echo "dataset=$NIGHTLY_MATRIX_DATASET" >> $GITHUB_ENV
echo "runner=$NIGHTLY_LABELS" >> $GITHUB_ENV
- name: set-pr-env
if: ${{github.event_name == 'pull_request'}}
env:
PR_MATRIX_MODEL_NAME: '["Llama-2-7b-chat-hf"]'
PR_MATRIX_DATASET: '["wikitext"]'
PR_MATRIX_PRECISION: '["mixed_fp4", "fp8"]'
PR_LABELS: '["self-hosted", "llm", "temp-arc20"]'
run: |
echo "model_name=$PR_MATRIX_MODEL_NAME" >> $GITHUB_ENV
echo "precision=$PR_MATRIX_PRECISION" >> $GITHUB_ENV
echo "dataset=$PR_MATRIX_DATASET" >> $GITHUB_ENV
echo "runner=$PR_LABELS" >> $GITHUB_ENV
- name: set-manual-env
if: ${{github.event_name == 'workflow_dispatch'}}
env:
MANUAL_MATRIX_MODEL_NAME: ${{format('[ {0} ]', inputs.model_name)}}
MANUAL_MATRIX_DATASET: ${{format('[ {0} ]', inputs.dataset)}}
MANUAL_MATRIX_PRECISION: ${{format('[ {0} ]', inputs.precision)}}
MANUAL_LABELS: ${{format('["self-hosted", "llm", {0}]', inputs.runs-on)}}
run: |
echo "model_name=$MANUAL_MATRIX_MODEL_NAME" >> $GITHUB_ENV
echo "precision=$MANUAL_MATRIX_PRECISION" >> $GITHUB_ENV
echo "dataset=$MANUAL_MATRIX_DATASET" >> $GITHUB_ENV
echo "runner=$MANUAL_LABELS" >> $GITHUB_ENV
- name: set-matrix
id: set-matrix
run: |
echo "model_name=$model_name" >> $GITHUB_OUTPUT
echo "precision=$precision" >> $GITHUB_OUTPUT
echo "dataset=$dataset" >> $GITHUB_OUTPUT
echo "runner=$runner" >> $GITHUB_OUTPUT
llm-harness-evalution:
timeout-minutes: 1000
needs: [llm-cpp-build, set-matrix]
strategy:
fail-fast: false
matrix:
# include:
# python-version: "3.9"
# model_name: "stablelm-3b-4e1t"
# task: "arc"
# precision: "sym_int4" #options: sym_int4, fp4, mixed_fp4, sym_int8, fp8, mixed_fp8
python-version: ["3.9"]
model_name: ${{ fromJson(needs.set-matrix.outputs.model_name) }}
dataset: ${{ fromJson(needs.set-matrix.outputs.dataset) }}
precision: ${{ fromJson(needs.set-matrix.outputs.precision) }}
device: [xpu]
runs-on: ${{ fromJson(needs.set-matrix.outputs.runner) }}
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
ORIGIN_DIR: /mnt/disk1/models
DATASET_DIR: /home/arda/kai/non-pip/ppl-eval
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade setuptools==58.0.4
python -m pip install --upgrade wheel
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary
- name: Run LLM install (all) test
uses: ./.github/actions/llm/setup-llm-env
with:
extra-dependency: "xpu_2.1"
- name: Download models and datasets
shell: bash
run: |
echo "MODEL_PATH=${ORIGIN_DIR}/${{ matrix.model_name }}/" >> "$GITHUB_ENV"
MODEL_PATH=${ORIGIN_DIR}/${{ matrix.model_name }}/
wget -r -nH -nc --no-verbose --cut-dirs=1 ${LLM_FTP_URL}/llm/${{ matrix.model_name }} -P ${ORIGIN_DIR}/
echo "DATASET_PATH=${DATASET_DIR}/${{ matrix.model_name }}/" >> "$GITHUB_ENV"
DATASET_PATH=${DATASET_DIR}/${{ matrix.dataset }}
- name: Upgrade packages
shell: bash
run: |
pip install --upgrade transformers==4.34.0 datasets==2.14.6
- name: Run perplexity
shell: bash
working-directory: ${{ github.workspace }}/python/llm/dev/benchmark/perplexity
env:
USE_XETLA: OFF
# SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS: 1
run: |
source /opt/intel/oneapi/setvars.sh
python run.py \
--model_path ${MODEL_PATH} \
--precisions ${{ matrix.precision }} \
--device ${{ matrix.device }} \
--dataset path=${DATASET_PATH}