Skip to content

CI: Switch Qwen to its 72B version #3

CI: Switch Qwen to its 72B version

CI: Switch Qwen to its 72B version #3

name: Test with Phi-3 Mini
on: [push, pull_request]
jobs:
zero-shot:
runs-on: ubuntu-22.04
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '18'
- name: Prepare LLM (Phi-3 Mini)
uses: ./.github/actions/prepare-llm
timeout-minutes: 3
- run: echo 'Which planet in our solar system is the largest?' | ./query-llm.js | tee output.txt
env:
LLM_API_BASE_URL: 'http://127.0.0.1:8080/v1'
LLM_ZERO_SHOT: 1
- run: cat output.txt
- run: grep -i jupiter output.txt
chain-of-thought:
runs-on: ubuntu-22.04
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '18'
- name: Prepare LLM (Phi-3 Mini)
uses: ./.github/actions/prepare-llm
timeout-minutes: 3
- run: echo 'Which planet in our solar system is the largest?' | ./query-llm.js | tee output.txt
env:
LLM_API_BASE_URL: 'http://127.0.0.1:8080/v1'
- run: cat output.txt
- run: grep -i jupiter output.txt
multi-turn:
needs: chain-of-thought
runs-on: ubuntu-22.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '18'
- name: Prepare LLM (Phi-3 Mini)
uses: ./.github/actions/prepare-llm
timeout-minutes: 3
- run: ./query-llm.js tests/canary-multi-turn.txt
env:
LLM_API_BASE_URL: 'http://127.0.0.1:8080/v1'