Merge pull request #1510 from lightpanda-io/wp/mrdimidium/ram-e2e-tests

Use cgroups for RAM mesurement
This commit is contained in:
Pierre Tachoire
2026-02-12 11:55:02 +01:00
committed by GitHub

View File

@@ -122,10 +122,19 @@ jobs:
needs: zig-build-release needs: zig-build-release
env: env:
MAX_MEMORY: 26000 MAX_VmHWM: 26000 # 26MB (KB)
MAX_CG_PEAK: 6000 # 6MB (KB)
MAX_AVG_DURATION: 17 MAX_AVG_DURATION: 17
LIGHTPANDA_DISABLE_TELEMETRY: true LIGHTPANDA_DISABLE_TELEMETRY: true
# How to give cgroups access to the user actions-runner on the host:
# $ sudo apt install cgroup-tools
# $ sudo chmod o+w /sys/fs/cgroup/cgroup.procs
# $ sudo mkdir -p /sys/fs/cgroup/actions-runner
# $ sudo chown -R actions-runner:actions-runner /sys/fs/cgroup/actions-runner
CG_ROOT: /sys/fs/cgroup
CG: actions-runner/lpd_${{ github.run_id }}_${{ github.run_attempt }}
# use a self host runner. # use a self host runner.
runs-on: lpd-bench-hetzner runs-on: lpd-bench-hetzner
timeout-minutes: 15 timeout-minutes: 15
@@ -150,10 +159,20 @@ jobs:
go run ws/main.go & echo $! > WS.pid go run ws/main.go & echo $! > WS.pid
sleep 2 sleep 2
- name: run lightpanda in cgroup
run: |
if [ ! -f /sys/fs/cgroup/cgroup.controllers ]; then
echo "cgroup v2 not available: /sys/fs/cgroup/cgroup.controllers missing"
exit 1
fi
mkdir -p $CG_ROOT/$CG
cgexec -g memory:$CG ./lightpanda serve & echo $! > LPD.pid
sleep 2
- name: run puppeteer - name: run puppeteer
run: | run: |
./lightpanda serve & echo $! > LPD.pid
sleep 2
RUNS=100 npm run bench-puppeteer-cdp > puppeteer.out || exit 1 RUNS=100 npm run bench-puppeteer-cdp > puppeteer.out || exit 1
cat /proc/`cat LPD.pid`/status |grep VmHWM|grep -oP '\d+' > LPD.VmHWM cat /proc/`cat LPD.pid`/status |grep VmHWM|grep -oP '\d+' > LPD.VmHWM
kill `cat LPD.pid` kill `cat LPD.pid`
@@ -161,11 +180,30 @@ jobs:
- name: puppeteer result - name: puppeteer result
run: cat puppeteer.out run: cat puppeteer.out
- name: memory regression - name: cgroup memory regression
run: |
PID=$(cat LPD.pid)
while kill -0 $PID 2>/dev/null; do
sleep 1
done
if [ ! -f $CG_ROOT/$CG/memory.peak ]; then
echo "memory.peak not available in $CG"
exit 1
fi
PEAK_BYTES=$(cat $CG_ROOT/$CG/memory.peak)
PEAK_KB=$((PEAK_BYTES / 1024))
echo "memory.peak_bytes=$PEAK_BYTES"
echo "memory.peak_kb=$PEAK_KB"
test "$PEAK_KB" -le "$MAX_CG_PEAK"
- name: virtual memory regression
run: | run: |
export LPD_VmHWM=`cat LPD.VmHWM` export LPD_VmHWM=`cat LPD.VmHWM`
echo "Peak resident set size: $LPD_VmHWM" echo "Peak resident set size: $LPD_VmHWM"
test "$LPD_VmHWM" -le "$MAX_MEMORY" test "$LPD_VmHWM" -le "$MAX_VmHWM"
- name: cleanup cgroup
run: rmdir $CG_ROOT/$CG
- name: duration regression - name: duration regression
run: | run: |
@@ -178,7 +216,8 @@ jobs:
export AVG_DURATION=`cat puppeteer.out|grep 'avg run'|sed 's/avg run duration (ms) //'` export AVG_DURATION=`cat puppeteer.out|grep 'avg run'|sed 's/avg run duration (ms) //'`
export TOTAL_DURATION=`cat puppeteer.out|grep 'total duration'|sed 's/total duration (ms) //'` export TOTAL_DURATION=`cat puppeteer.out|grep 'total duration'|sed 's/total duration (ms) //'`
export LPD_VmHWM=`cat LPD.VmHWM` export LPD_VmHWM=`cat LPD.VmHWM`
echo "{\"duration_total\":${TOTAL_DURATION},\"duration_avg\":${AVG_DURATION},\"mem_peak\":${LPD_VmHWM}}" > bench.json export LPD_CG_PEAK_KB=$(( $(cat $CG/memory.peak) / 1024 ))
echo "{\"duration_total\":${TOTAL_DURATION},\"duration_avg\":${AVG_DURATION},\"mem_peak\":${LPD_VmHWM},\"cg_mem_peak\":${LPD_MEM_CG_KB}}" > bench.json
cat bench.json cat bench.json
- name: run hyperfine - name: run hyperfine