Cleanup benchmark builds a little bit (#691)

We don't need to run any cachegrind benchmarks in benchmark-dev, since
benchmark uses our new callgrind setup instead.

Also removes prototyping filters that we no longer need from all builds.
This commit is contained in:
Arseny Kapoulkine 2022-09-29 15:42:23 -07:00 committed by GitHub
parent 944e8375aa
commit 937ef2efd4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 0 additions and 313 deletions

View File

@ -10,7 +10,6 @@ on:
- "papers/**"
- "rfcs/**"
- "*.md"
- "prototyping/**"
jobs:
windows:
@ -25,8 +24,6 @@ jobs:
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
@ -117,8 +114,6 @@ jobs:
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
@ -145,19 +140,6 @@ jobs:
run: |
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Install valgrind
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get install valgrind
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee -a ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt
- name: Push benchmark results
id: pushBenchmarkAttempt1
continue-on-error: true
@ -201,185 +183,3 @@ jobs:
bench_tool: "benchmarkluau"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push Cachegrind benchmark results
if: matrix.os == 'ubuntu-latest'
id: pushBenchmarkCachegrindAttempt1
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }} (CacheGrind)
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push Cachegrind benchmark results (Attempt 2)
if: matrix.os == 'ubuntu-latest' && steps.pushBenchmarkCachegrindAttempt1.outcome == 'failure'
id: pushBenchmarkCachegrindAttempt2
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }} (CacheGrind)
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push Cachegrind benchmark results (Attempt 3)
if: matrix.os == 'ubuntu-latest' && steps.pushBenchmarkCachegrindAttempt2.outcome == 'failure'
id: pushBenchmarkCachegrindAttempt3
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }} (CacheGrind)
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
static-analysis:
name: luau-analyze
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
bench:
- {
script: "run-analyze",
timeout: 12,
title: "Luau Analyze",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
with:
token: "${{ secrets.BENCH_GITHUB_TOKEN }}"
- name: Build Luau
run: make config=release luau luau-analyze
- uses: actions/setup-python@v4
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
sudo pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Install valgrind
run: |
sudo apt-get install valgrind
- name: Run Luau Analyze on static file
run: sudo python ./bench/measure_time.py ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
run: sudo ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
- name: Push static analysis results
id: pushStaticAnalysisAttempt1
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis results (Attempt 2)
if: steps.pushStaticAnalysisAttempt1.outcome == 'failure'
id: pushStaticAnalysisAttempt2
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis results (Attempt 3)
if: steps.pushStaticAnalysisAttempt2.outcome == 'failure'
id: pushStaticAnalysisAttempt3
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis Cachegrind results
if: matrix.os == 'ubuntu-latest'
id: pushStaticAnalysisCachegrindAttempt1
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis Cachegrind results (Attempt 2)
if: matrix.os == 'ubuntu-latest' && steps.pushStaticAnalysisCachegrindAttempt1.outcome == 'failure'
id: pushStaticAnalysisCachegrindAttempt2
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis Cachegrind results (Attempt 2)
if: matrix.os == 'ubuntu-latest' && steps.pushStaticAnalysisCachegrindAttempt2.outcome == 'failure'
id: pushStaticAnalysisCachegrindAttempt3
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"

View File

@ -9,7 +9,6 @@ on:
- "papers/**"
- "rfcs/**"
- "*.md"
- "prototyping/**"
jobs:
callgrind:

View File

@ -9,14 +9,12 @@ on:
- 'papers/**'
- 'rfcs/**'
- '*.md'
- 'prototyping/**'
pull_request:
paths-ignore:
- 'docs/**'
- 'papers/**'
- 'rfcs/**'
- '*.md'
- 'prototyping/**'
jobs:
unix:

View File

@ -9,7 +9,6 @@ on:
- 'papers/**'
- 'rfcs/**'
- '*.md'
- 'prototyping/**'
jobs:
build:

View File

@ -1,109 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
declare -A event_map
event_map[Ir]="TotalInstructionsExecuted,executions\n"
event_map[I1mr]="L1_InstrReadCacheMisses,misses/op\n"
event_map[ILmr]="LL_InstrReadCacheMisses,misses/op\n"
event_map[Dr]="TotalMemoryReads,reads\n"
event_map[D1mr]="L1_DataReadCacheMisses,misses/op\n"
event_map[DLmr]="LL_DataReadCacheMisses,misses/op\n"
event_map[Dw]="TotalMemoryWrites,writes\n"
event_map[D1mw]="L1_DataWriteCacheMisses,misses/op\n"
event_map[DLmw]="LL_DataWriteCacheMisses,misses/op\n"
event_map[Bc]="ConditionalBranchesExecuted,executions\n"
event_map[Bcm]="ConditionalBranchMispredictions,mispredictions/op\n"
event_map[Bi]="IndirectBranchesExecuted,executions\n"
event_map[Bim]="IndirectBranchMispredictions,mispredictions/op\n"
now_ms() {
echo -n $(date +%s%N | cut -b1-13)
}
# Run cachegrind on a given benchmark and echo the results.
ITERATION_COUNT=$4
START_TIME=$(now_ms)
ARGS=( "$@" )
REST_ARGS="${ARGS[@]:4}"
valgrind \
--quiet \
--tool=cachegrind \
"$1" "$2" $REST_ARGS>/dev/null
ARGS=( "$@" )
REST_ARGS="${ARGS[@]:4}"
TIME_ELAPSED=$(bc <<< "$(now_ms) - ${START_TIME}")
# Generate report using cg_annotate and extract the header and totals of the
# recorded events valgrind was configured to record.
CG_RESULTS=$(cg_annotate $(ls -t cachegrind.out.* | head -1))
CG_HEADERS=$(grep -B2 'PROGRAM TOTALS$' <<< "$CG_RESULTS" | head -1 | sed -E 's/\s+/\n/g' | sed '/^$/d')
CG_TOTALS=$(grep 'PROGRAM TOTALS$' <<< "$CG_RESULTS" | head -1 | grep -Po '[0-9,]+\s' | tr -d ', ')
TOTALS_ARRAY=($CG_TOTALS)
HEADERS_ARRAY=($CG_HEADERS)
declare -A header_map
for i in "${!TOTALS_ARRAY[@]}"; do
header_map[${HEADERS_ARRAY[$i]}]=$i
done
# Map the results to the format that the benchmark script expects.
for i in "${!TOTALS_ARRAY[@]}"; do
TOTAL=${TOTALS_ARRAY[$i]}
# Labels and unit descriptions are packed together in the map.
EVENT_TUPLE=${event_map[${HEADERS_ARRAY[$i]}]}
IFS=$',' read -d '\n' -ra EVENT_VALUES < <(printf "%s" "$EVENT_TUPLE")
EVENT_NAME="${EVENT_VALUES[0]}"
UNIT="${EVENT_VALUES[1]}"
case ${HEADERS_ARRAY[$i]} in
I1mr | ILmr)
REF=${TOTALS_ARRAY[header_map["Ir"]]}
OPS_PER_SEC=$(bc -l <<< "$TOTAL / $REF")
;;
D1mr | DLmr)
REF=${TOTALS_ARRAY[header_map["Dr"]]}
OPS_PER_SEC=$(bc -l <<< "$TOTAL / $REF")
;;
D1mw | DLmw)
REF=${TOTALS_ARRAY[header_map["Dw"]]}
OPS_PER_SEC=$(bc -l <<< "$TOTAL / $REF")
;;
Bcm)
REF=${TOTALS_ARRAY[header_map["Bc"]]}
OPS_PER_SEC=$(bc -l <<< "$TOTAL / $REF")
;;
Bim)
REF=${TOTALS_ARRAY[header_map["Bi"]]}
OPS_PER_SEC=$(bc -l <<< "$TOTAL / $REF")
;;
*)
OPS_PER_SEC=$(bc -l <<< "$TOTAL")
;;
esac
STD_DEV="0%"
RUNS="1"
if [[ $OPS_PER_SEC =~ ^[+-]?[0-9]*$ ]]
then # $OPS_PER_SEC is integer
printf "%s#%s x %.0f %s ±%s (%d runs sampled)\n" \
"$3" "$EVENT_NAME" "$OPS_PER_SEC" "$UNIT" "$STD_DEV" "$RUNS"
else # $OPS_PER_SEC is float
printf "%s#%s x %.10f %s ±%s (%d runs sampled)\n" \
"$3" "$EVENT_NAME" "$OPS_PER_SEC" "$UNIT" "$STD_DEV" "$RUNS"
fi
done