-
Notifications
You must be signed in to change notification settings - Fork 0
155 lines (136 loc) · 5.48 KB
/
run-evaluation-leviathan.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
name: Run evaluation on Leviathan
on:
schedule:
# 1am PT.
- cron: '0 8 * * *'
workflow_dispatch:
env:
# Configure the number of tasks to run in parallel. Please see the
# documentation in [run-evaluation.sh](../../run-evaluation.sh) for more
# details.
NUM_JOBS_VIVADO_TASKS: 50
NUM_JOBS_LAKEROAD_TASKS: 20
NUM_JOBS_OTHER_TASKS: 100
# Interval at which to print uptime. See run-evaluation.sh script.
PRINT_UPTIME_INTERVAL: 60
# Number of jobs when building something with make. This is a separate task
# from the above as each job generally uses one thread.
MAKE_JOBS: 128
jobs:
# TODO(@gussmith23) It would be nice if this wasn't necessary
cleaner:
runs-on: [self-hosted, leviathan]
steps:
- name: Clean up previous runs
run: rm -rf "${{ github.workspace }}"
build-docker-image:
# Make sure we clean first.
needs: cleaner
runs-on: [self-hosted, leviathan]
outputs:
tag: ${{ steps.generate-tag.outputs.tag }}
steps:
- uses: actions/checkout@v3
- name: Checkout specific submodules
run: |
git submodule init
git submodule update lakeroad
cd lakeroad
git submodule init
git submodule update lakeroad-private/
- name: Zip up artifact for Zenodo
run: |
mkdir lakeroad-evaluation
rsync -r . --exclude lakeroad-evaluation/ lakeroad-evaluation/
zip -9 -r lakeroad-evaluation.zip lakeroad-evaluation/
- name: Upload Zenodo artifact
uses: actions/upload-artifact@v3
with:
name: lakeroad-evaluation
path: lakeroad-evaluation.zip
- name: Clean up intermediate files
run: rm -rf lakeroad-evaluation lakeroad-evaluation.zip
- name: Generate Docker image tag
id: generate-tag
run: |
export TAG=lakeroad-evaluation:$(date +%s)
echo "Docker image tag: $TAG"
echo "tag=$TAG" >> $GITHUB_OUTPUT
- name: Build Docker image
run: |
docker build \
. \
--file Dockerfile \
--tag ${{ steps.generate-tag.outputs.tag }} \
--build-arg VIVADO_BIN_DIR=/tools/Xilinx/Vivado/2023.1/bin \
--build-arg QUARTUS_BIN_DIR=/tools/intel/quartus/bin/ \
--build-arg DIAMOND_BINDIR=/usr/local/diamond/3.12/bin/lin64 \
--build-arg MAKE_JOBS=${{ env.MAKE_JOBS }}
run-evaluation:
# TODO(@gussmith23): Figure out what's taking so long. (probs Verilator)
timeout-minutes: 480
needs: build-docker-image
runs-on: [self-hosted, leviathan]
outputs:
container-name: ${{ steps.run-evaluation.outputs.container-name }}
steps:
- id: run-evaluation
# NOTE: -n argument for doit controls # simultaneous processes. I have to
# set it manually -- if it's too high, too many Vivados running at once
# will cause out-of-memory errors.
run: |
CONTAINER_NAME=lakeroad-evaluation-run-$(date +%s)
echo "Docker container name: $CONTAINER_NAME"
echo "container-name=$CONTAINER_NAME" >> $GITHUB_OUTPUT
docker run \
--name $CONTAINER_NAME \
-v /tools/Xilinx:/tools/Xilinx \
-v /usr/local/diamond:/usr/local/diamond \
-v /tools/intel:/tools/intel \
--env LOGLEVEL=INFO \
--env LRE_OUTPUT_DIR=/root/results \
--env NUM_JOBS_VIVADO_TASKS=${{ env.NUM_JOBS_VIVADO_TASKS }} \
--env NUM_JOBS_LAKEROAD_TASKS=${{ env.NUM_JOBS_LAKEROAD_TASKS }} \
--env NUM_JOBS_OTHER_TASKS=${{ env.NUM_JOBS_OTHER_TASKS }} \
--env PRINT_UPTIME_INTERVAL=${{ env.PRINT_UPTIME_INTERVAL }} \
${{ needs.build-docker-image.outputs.tag }} \
bash /root/run-evaluation.sh
# send-results-to-slack:
# needs: run-evaluation
# runs-on: [self-hosted, leviathan]
# steps:
# - run: docker cp ${{ needs.run-evaluation.outputs.container-name }}:/root/results results/
# - run: |
# # Send initial message to start the thread.
# THREAD_TS=$(curl -F "channel=${{ secrets.LAKEROAD_CHANNEL_ID }}" \
# -F "token=${{ secrets.SLACK_OAUTH_TOKEN }}" \
# -F "text=Results from CI run" \
# -X POST https://slack.com/api/chat.postMessage \
# | jq .ts)
# curl -F "channels=${{ secrets.LAKEROAD_CHANNEL_ID }}" \
# -F "token=${{ secrets.SLACK_OAUTH_TOKEN }}" \
# -F "file=@results/figures/sofa_figure.png" \
# -F "thread_ts=$THREAD_TS" \
# -X POST https://slack.com/api/files.upload
# curl -F "channels=${{ secrets.LAKEROAD_CHANNEL_ID }}" \
# -F "token=${{ secrets.SLACK_OAUTH_TOKEN }}" \
# -F "file=@results/figures/lattice_ecp5_figure.png" \
# -F "thread_ts=$THREAD_TS" \
# -X POST https://slack.com/api/files.upload
# curl -F "channels=${{ secrets.LAKEROAD_CHANNEL_ID }}" \
# -F "token=${{ secrets.SLACK_OAUTH_TOKEN }}" \
# -F "file=@results/figures/xilinx_ultrascale_plus_figure.png" \
# -F "thread_ts=$THREAD_TS" \
# -X POST https://slack.com/api/files.upload
upload-artifact:
# Runs even if evaluation failed. Lets us inspect partial results and logs.
if: always()
needs: run-evaluation
runs-on: [self-hosted, leviathan]
steps:
- run: docker cp ${{ needs.run-evaluation.outputs.container-name }}:/root/results results/
- run: zip -r results.zip results/
- uses: actions/upload-artifact@v3
with:
name: evaluation-results
path: results.zip