luau/.github/workflows/benchmark-dev.yml

386 lines
15 KiB
YAML

name: benchmark-dev
on:
push:
branches:
- master
paths-ignore:
- "docs/**"
- "papers/**"
- "rfcs/**"
- "*.md"
- "prototyping/**"
jobs:
windows:
name: windows-${{matrix.arch}}
strategy:
fail-fast: false
matrix:
os: [windows-latest]
arch: [Win32, x64]
bench:
- {
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- name: Checkout Luau repository
uses: actions/checkout@v3
- name: Build Luau
shell: bash # necessary for fail-fast
run: |
mkdir build && cd build
cmake .. -DCMAKE_BUILD_TYPE=Release
cmake --build . --target Luau.Repl.CLI --config Release
cmake --build . --target Luau.Analyze.CLI --config Release
- name: Move build files to root
run: |
move build/Release/* .
- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Run benchmark
run: |
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Push benchmark results
id: pushBenchmarkAttempt1
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
bench_tool: "benchmarkluau"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push benchmark results (Attempt 2)
id: pushBenchmarkAttempt2
continue-on-error: true
if: steps.pushBenchmarkAttempt1.outcome == 'failure'
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
bench_tool: "benchmarkluau"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push benchmark results (Attempt 3)
id: pushBenchmarkAttempt3
continue-on-error: true
if: steps.pushBenchmarkAttempt2.outcome == 'failure'
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: "${{ matrix.bench.title }} (Windows ${{matrix.arch}})"
bench_tool: "benchmarkluau"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
unix:
name: ${{matrix.os}}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
bench:
- {
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- name: Checkout Luau repository
uses: actions/checkout@v3
- name: Build Luau
run: make config=release luau luau-analyze
- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Run benchmark
run: |
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Install valgrind
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get install valgrind
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee -a ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt
- name: Push benchmark results
id: pushBenchmarkAttempt1
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "benchmarkluau"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push benchmark results (Attempt 2)
id: pushBenchmarkAttempt2
continue-on-error: true
if: steps.pushBenchmarkAttempt1.outcome == 'failure'
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "benchmarkluau"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push benchmark results (Attempt 3)
id: pushBenchmarkAttempt3
continue-on-error: true
if: steps.pushBenchmarkAttempt2.outcome == 'failure'
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "benchmarkluau"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push Cachegrind benchmark results
if: matrix.os == 'ubuntu-latest'
id: pushBenchmarkCachegrindAttempt1
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }} (CacheGrind)
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push Cachegrind benchmark results (Attempt 2)
if: matrix.os == 'ubuntu-latest' && steps.pushBenchmarkCachegrindAttempt1.outcome == 'failure'
id: pushBenchmarkCachegrindAttempt2
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }} (CacheGrind)
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push Cachegrind benchmark results (Attempt 3)
if: matrix.os == 'ubuntu-latest' && steps.pushBenchmarkCachegrindAttempt2.outcome == 'failure'
id: pushBenchmarkCachegrindAttempt3
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }} (CacheGrind)
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
static-analysis:
name: luau-analyze
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
bench:
- {
script: "run-analyze",
timeout: 12,
title: "Luau Analyze",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
with:
token: "${{ secrets.BENCH_GITHUB_TOKEN }}"
- name: Build Luau
run: make config=release luau luau-analyze
- uses: actions/setup-python@v4
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
sudo pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Install valgrind
run: |
sudo apt-get install valgrind
- name: Run Luau Analyze on static file
run: sudo python ./bench/measure_time.py ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
run: sudo ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
- name: Push static analysis results
id: pushStaticAnalysisAttempt1
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis results (Attempt 2)
if: steps.pushStaticAnalysisAttempt1.outcome == 'failure'
id: pushStaticAnalysisAttempt2
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis results (Attempt 3)
if: steps.pushStaticAnalysisAttempt2.outcome == 'failure'
id: pushStaticAnalysisAttempt3
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis Cachegrind results
if: matrix.os == 'ubuntu-latest'
id: pushStaticAnalysisCachegrindAttempt1
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis Cachegrind results (Attempt 2)
if: matrix.os == 'ubuntu-latest' && steps.pushStaticAnalysisCachegrindAttempt1.outcome == 'failure'
id: pushStaticAnalysisCachegrindAttempt2
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"
- name: Push static analysis Cachegrind results (Attempt 2)
if: matrix.os == 'ubuntu-latest' && steps.pushStaticAnalysisCachegrindAttempt2.outcome == 'failure'
id: pushStaticAnalysisCachegrindAttempt3
continue-on-error: true
uses: ./.github/workflows/push-results
with:
repository: ${{ matrix.benchResultsRepo.name }}
branch: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
bench_name: ${{ matrix.bench.title }}
bench_tool: "roblox"
bench_output_file_path: "./${{ matrix.bench.script }}-output.txt"
bench_external_data_json_path: "./gh-pages/dev/bench/data-${{ matrix.os }}.json"