Skip to content

Commit 4874faf

Browse files
committed
[WIP] Parallel GH actions workflow for Nixpkgs eval
Partly taken from #352808 and #269403
1 parent f8d0c64 commit 4874faf

File tree

3 files changed

+175
-0
lines changed

3 files changed

+175
-0
lines changed

.github/workflows/eval.yml

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
name: Eval
2+
3+
on: pull_request_target
4+
5+
permissions:
6+
contents: read
7+
8+
jobs:
9+
tests:
10+
name: eval-check
11+
runs-on: ubuntu-latest
12+
strategy:
13+
matrix:
14+
system: [x86_64-linux, aarch64-linux, aarch64-darwin, x86_64-darwin]
15+
steps:
16+
# Important: Because of `pull_request_target`, this doesn't check out the PR,
17+
# but rather the base branch of the PR, which is needed so we don't run untrusted code
18+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
19+
with:
20+
path: base
21+
sparse-checkout: ci
22+
- name: Resolving the merge commit
23+
env:
24+
GH_TOKEN: ${{ github.token }}
25+
run: |
26+
if mergedSha=$(base/ci/get-merge-commit.sh ${{ github.repository }} ${{ github.event.number }}); then
27+
echo "Checking the merge commit $mergedSha"
28+
echo "mergedSha=$mergedSha" >> "$GITHUB_ENV"
29+
else
30+
# Skipping so that no notifications are sent
31+
echo "Skipping the rest..."
32+
fi
33+
rm -rf base
34+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
35+
# Add this to _all_ subsequent steps to skip them
36+
if: env.mergedSha
37+
with:
38+
ref: ${{ env.mergedSha }}
39+
40+
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
41+
if: env.mergedSha
42+
43+
- name: Enable swap
44+
if: env.mergedSha
45+
run: |
46+
sudo fallocate -l 10G /swapfile
47+
sudo chmod 600 /swapfile
48+
sudo mkswap /swapfile
49+
sudo swapon /swapfile
50+
51+
- name: Check eval
52+
if: env.mergedSha
53+
run: ./ci/eval-nixpkgs.sh --system "${{ matrix.system }}"

ci/eval-nixpkgs.sh

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
#!/usr/bin/env nix-shell
2+
#!nix-shell -i bash -p moreutils -I nixpkgs=channel:nixpkgs-unstable
3+
4+
set -euxo pipefail
5+
6+
system="x86_64-linux"
7+
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
8+
NIXPKGS_PATH="$(readlink -f "$SCRIPT_DIR"/..)"
9+
10+
parseArgs() {
11+
while [[ $# -gt 0 ]]; do
12+
case $1 in
13+
--system)
14+
system=$2
15+
shift 2
16+
;;
17+
*)
18+
echo "Unknown argument: $1"
19+
exit 1
20+
;;
21+
esac
22+
done
23+
}
24+
25+
main() {
26+
parseArgs "$@"
27+
tmpdir=$(mktemp -d)
28+
trap 'rm -rf "$tmpdir"' EXIT
29+
30+
nix-instantiate --eval --strict --json --arg enableWarnings false "$NIXPKGS_PATH"/pkgs/top-level/release-attrpaths-superset.nix -A paths > "$tmpdir/paths.json"
31+
32+
CORES=$(nproc)
33+
# Originally @amjoseph: note that the number of processes spawned is four times
34+
# the number of cores -- this helps in two ways:
35+
# 1. Keeping cores busy while I/O operations are in flight
36+
# 2. Since the amount of time needed for the jobs is *not* balanced
37+
# this minimizes the "tail latency" for the very last job to finish
38+
# (on one core) by making the job size smaller.
39+
NUM_CHUNKS=$(( 4 * CORES ))
40+
41+
42+
(
43+
set +e
44+
parallel -j "$CORES" \
45+
nix-env -qaP --no-name --out-path --arg checkMeta true --arg includeBroken true \
46+
--arg systems "[\"$system\"]" \
47+
-f "$NIXPKGS_PATH"/ci/parallel.nix --arg attrPathFile "$tmpdir"/paths.json \
48+
--arg numChunks "$NUM_CHUNKS" --show-trace --arg myChunk \
49+
-- $(seq 0 $(( NUM_CHUNKS - 1 ))) > "$tmpdir/paths"
50+
echo $? > "$tmpdir/exit-code"
51+
) &
52+
pid=$!
53+
while kill -0 "$pid"; do
54+
free -g >&2
55+
sleep 20
56+
done
57+
jq --raw-input --slurp 'split("\n") | map(select(. != "") | split(" ") | map(select(. != "")) | { key: .[0], value: .[1] }) | from_entries' "$tmpdir/paths"
58+
exit "$(cat "$tmpdir/exit-code")"
59+
}
60+
61+
main "$@"

ci/parallel.nix

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
/*
2+
Invocation:
3+
4+
5+
Invocation; note that the number of processes spawned is four times
6+
the number of cores -- this helps in two ways:
7+
8+
1. Keeping cores busy while I/O operations are in flight
9+
10+
2. Since the amount of time needed for the jobs is *not* balanced
11+
this minimizes the "tail latency" for the very last job to finish
12+
(on one core) by making the job size smaller.
13+
14+
*/
15+
# see pkgs/top-level/nohydra
16+
{ lib ? import ../lib
17+
, checkMeta
18+
, includeBroken ? true
19+
, path ? ./..
20+
, systems
21+
, myChunk
22+
, numChunks
23+
, attrPathFile
24+
}:
25+
26+
let
27+
attrPaths = builtins.fromJSON (builtins.readFile attrPathFile);
28+
chunkSize = (lib.length attrPaths) / numChunks;
29+
myPaths =
30+
let
31+
dropped = lib.drop (chunkSize*myChunk) attrPaths;
32+
in
33+
if myChunk == numChunks - 1
34+
then dropped
35+
else lib.take chunkSize dropped;
36+
37+
unfiltered = import ../pkgs/top-level/release-outpaths.nix {
38+
inherit checkMeta path includeBroken systems;
39+
};
40+
41+
f = i: m: a:
42+
lib.mapAttrs (name: values:
43+
if a ? ${name} then
44+
if lib.any (value: lib.length value <= i + 1) values then
45+
a.${name}
46+
else
47+
f (i + 1) values a.${name}
48+
else
49+
null
50+
) (lib.groupBy (a: lib.elemAt a i) m);
51+
52+
filtered = f 0 myPaths unfiltered;
53+
54+
recurseEverywhere = val:
55+
if lib.isDerivation val || !(lib.isAttrs val)
56+
then val
57+
else (builtins.mapAttrs (_: v: recurseEverywhere v) val)
58+
// { recurseForDerivations = true; };
59+
60+
in
61+
recurseEverywhere filtered

0 commit comments

Comments
 (0)