Merge branch 'master' into merge

This commit is contained in:
Arseny Kapoulkine 2022-07-07 18:07:30 -07:00
commit f9e76fc75c
21 changed files with 2612 additions and 256 deletions

270
.github/workflows/benchmark-dev.yml vendored Normal file
View File

@ -0,0 +1,270 @@
name: benchmark-dev
on:
push:
branches:
- master
paths-ignore:
- "docs/**"
- "papers/**"
- "rfcs/**"
- "*.md"
- "prototyping/**"
jobs:
windows:
name: windows-${{matrix.arch}}
strategy:
fail-fast: false
matrix:
os: [windows-latest]
arch: [Win32, x64]
bench:
- {
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- name: Checkout Luau repository
uses: actions/checkout@v3
- name: Build Luau
shell: bash # necessary for fail-fast
run: |
mkdir build && cd build
cmake .. -DCMAKE_BUILD_TYPE=Release
cmake --build . --target Luau.Repl.CLI --config Release
cmake --build . --target Luau.Analyze.CLI --config Release
- name: Move build files to root
run: |
move build/Release/* .
- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Run benchmark
run: |
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Checkout Benchmark Results repository
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
ref: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
- name: Store ${{ matrix.bench.title }} result
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }} (Windows ${{matrix.arch}})
tool: "benchmarkluau"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data-${{ matrix.os }}.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..
unix:
name: ${{matrix.os}}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
bench:
- {
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- name: Checkout Luau repository
uses: actions/checkout@v3
- name: Build Luau
run: make config=release luau luau-analyze
- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Run benchmark
run: |
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Install valgrind
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get install valgrind
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee -a ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt
- name: Checkout Benchmark Results repository
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
ref: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
- name: Store ${{ matrix.bench.title }} result
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
tool: "benchmarkluau"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
- name: Store ${{ matrix.bench.title }} result (CacheGrind)
if: matrix.os == 'ubuntu-latest'
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }} (CacheGrind)
tool: "roblox"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data-${{ matrix.os }}.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..
static-analysis:
name: luau-analyze
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
bench:
- {
script: "run-analyze",
timeout: 12,
title: "Luau Analyze",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
with:
token: "${{ secrets.BENCH_GITHUB_TOKEN }}"
- name: Build Luau
run: make config=release luau luau-analyze
- uses: actions/setup-python@v4
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
sudo pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Install valgrind
run: |
sudo apt-get install valgrind
- name: Run Luau Analyze on static file
run: sudo python ./bench/measure_time.py ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
run: sudo ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/other/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
- name: Checkout Benchmark Results repository
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
ref: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
- name: Store ${{ matrix.bench.title }} result
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
tool: "benchmarkluau"
gh-pages-branch: "main"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
- name: Store ${{ matrix.bench.title }} result (CacheGrind)
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
tool: "roblox"
gh-pages-branch: "main"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data-${{ matrix.os }}.json
github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data-${{ matrix.os }}.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..

View File

@ -12,21 +12,13 @@ on:
- "prototyping/**"
jobs:
windows:
name: windows-${{matrix.arch}}
callgrind:
name: callgrind ${{ matrix.compiler }}
strategy:
fail-fast: false
matrix:
os: [windows-latest]
arch: [Win32, x64]
bench:
- {
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
os: [ubuntu-22.04]
compiler: [g++]
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
@ -35,200 +27,40 @@ jobs:
- name: Checkout Luau repository
uses: actions/checkout@v3
- name: Build Luau
shell: bash # necessary for fail-fast
run: |
mkdir build && cd build
cmake .. -DCMAKE_BUILD_TYPE=Release
cmake --build . --target Luau.Repl.CLI --config Release
cmake --build . --target Luau.Analyze.CLI --config Release
- name: Move build files to root
run: |
move build/Release/* .
- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Run benchmark
run: |
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Checkout Benchmark Results repository
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
ref: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
- name: Store ${{ matrix.bench.title }} result
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }} (Windows ${{matrix.arch}})
tool: "benchmarkluau"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..
unix:
name: ${{matrix.os}}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
bench:
- {
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- name: Checkout Luau repository
uses: actions/checkout@v3
- name: Build Luau
run: make config=release luau luau-analyze
- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Run benchmark
run: |
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Install valgrind
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get install valgrind
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee -a ${{ matrix.bench.script }}-output.txt
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt
- name: Checkout Benchmark Results repository
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
ref: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
- name: Store ${{ matrix.bench.title }} result
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
tool: "benchmarkluau"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
- name: Store ${{ matrix.bench.title }} result (CacheGrind)
if: matrix.os == 'ubuntu-latest'
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }} (CacheGrind)
tool: "roblox"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..
static-analysis:
name: luau-analyze
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
bench:
- {
script: "run-analyze",
timeout: 12,
title: "Luau Analyze",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
with:
token: "${{ secrets.BENCH_GITHUB_TOKEN }}"
- name: Build Luau
run: make config=release luau luau-analyze
- uses: actions/setup-python@v4
with:
python-version: "3.9"
architecture: "x64"
- name: Install python dependencies
run: |
sudo pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Install valgrind
run: |
sudo apt-get install valgrind
- name: Run Luau Analyze on static file
run: sudo python ./bench/measure_time.py ./build/release/luau-analyze bench/static_analysis/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt
- name: Build Luau
run: CXX=${{ matrix.compiler }} make config=release CALLGRIND=1 luau luau-analyze
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
run: sudo ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/static_analysis/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
- name: Run benchmark (bench)
run: |
python bench/bench.py --callgrind --vm "./luau -O2" | tee -a bench-output.txt
- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/static_analysis/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt
- name: Run benchmark (analyze)
run: |
filter() {
awk '/.*I\s+refs:\s+[0-9,]+/ {gsub(",", "", $4); X=$4} END {print "SUCCESS: '$1' : " X/1e7 "ms +/- 0% on luau-analyze"}'
}
valgrind --tool=callgrind ./luau-analyze --mode=nonstrict bench/other/LuauPolyfillMap.lua 2>&1 | filter map-nonstrict | tee -a analyze-output.txt
valgrind --tool=callgrind ./luau-analyze --mode=strict bench/other/LuauPolyfillMap.lua 2>&1 | filter map-strict | tee -a analyze-output.txt
valgrind --tool=callgrind ./luau-analyze --mode=nonstrict bench/other/regex.lua 2>&1 | filter regex-nonstrict | tee -a analyze-output.txt
valgrind --tool=callgrind ./luau-analyze --mode=strict bench/other/regex.lua 2>&1 | filter regex-strict | tee -a analyze-output.txt
- name: Checkout Benchmark Results repository
- name: Run benchmark (compile)
run: |
filter() {
awk '/.*I\s+refs:\s+[0-9,]+/ {gsub(",", "", $4); X=$4} END {print "SUCCESS: '$1' : " X/1e7 "ms +/- 0% on luau --compile"}'
}
valgrind --tool=callgrind ./luau --compile=null -O0 bench/other/LuauPolyfillMap.lua 2>&1 | filter map-O0 | tee -a compile-output.txt
valgrind --tool=callgrind ./luau --compile=null -O1 bench/other/LuauPolyfillMap.lua 2>&1 | filter map-O1 | tee -a compile-output.txt
valgrind --tool=callgrind ./luau --compile=null -O2 bench/other/LuauPolyfillMap.lua 2>&1 | filter map-O2 | tee -a compile-output.txt
valgrind --tool=callgrind ./luau --compile=null -O0 bench/other/regex.lua 2>&1 | filter regex-O0 | tee -a compile-output.txt
valgrind --tool=callgrind ./luau --compile=null -O1 bench/other/regex.lua 2>&1 | filter regex-O1 | tee -a compile-output.txt
valgrind --tool=callgrind ./luau --compile=null -O2 bench/other/regex.lua 2>&1 | filter regex-O2 | tee -a compile-output.txt
- name: Checkout benchmark results
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
@ -236,26 +68,29 @@ jobs:
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"
- name: Store ${{ matrix.bench.title }} result
- name: Store results (bench)
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
name: callgrind ${{ matrix.compiler }}
tool: "benchmarkluau"
output-file-path: ./bench-output.txt
external-data-json-path: ./gh-pages/bench.json
gh-pages-branch: "main"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
- name: Store ${{ matrix.bench.title }} result (CacheGrind)
- name: Store results (analyze)
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
tool: "roblox"
gh-pages-branch: "main"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
github-token: ${{ secrets.BENCH_GITHUB_TOKEN }}
name: luau-analyze
tool: "benchmarkluau"
output-file-path: ./analyze-output.txt
external-data-json-path: ./gh-pages/analyze.json
- name: Store results (compile)
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: luau --compile
tool: "benchmarkluau"
output-file-path: ./compile-output.txt
external-data-json-path: ./gh-pages/compile.json
- name: Push benchmark results
if: github.event_name == 'push'
@ -264,7 +99,7 @@ jobs:
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data.json
git add *.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..

View File

@ -345,6 +345,7 @@ private:
TypePackId freshTypePack(TypeLevel level);
TypeId resolveType(const ScopePtr& scope, const AstType& annotation);
TypeId resolveTypeWorker(const ScopePtr& scope, const AstType& annotation);
TypePackId resolveTypePack(const ScopePtr& scope, const AstTypeList& types);
TypePackId resolveTypePack(const ScopePtr& scope, const AstTypePack& annotation);
TypeId instantiateTypeFun(const ScopePtr& scope, const TypeFun& tf, const std::vector<TypeId>& typeParams,

View File

@ -496,6 +496,8 @@ CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOption
module->astTypes.clear();
module->astExpectedTypes.clear();
module->astOriginalCallTypes.clear();
module->astResolvedTypes.clear();
module->astResolvedTypePacks.clear();
module->scopes.resize(1);
}

View File

@ -700,6 +700,12 @@ struct TypeVarStringifier
void operator()(TypeId, const MetatableTypeVar& mtv)
{
state.result.invalid = true;
if (!state.exhaustive && mtv.syntheticName)
{
state.emit(*mtv.syntheticName);
return;
}
state.emit("{ @metatable ");
stringify(mtv.metatable);
state.emit(",");

View File

@ -205,20 +205,6 @@ struct Printer
}
}
void visualizeWithSelf(AstExpr& expr, bool self)
{
if (!self)
return visualize(expr);
AstExprIndexName* func = expr.as<AstExprIndexName>();
LUAU_ASSERT(func);
visualize(*func->expr);
writer.symbol(":");
advance(func->indexLocation.begin);
writer.identifier(func->index.value);
}
void visualizeTypePackAnnotation(const AstTypePack& annotation, bool forVarArg)
{
advance(annotation.location.begin);
@ -366,7 +352,7 @@ struct Printer
}
else if (const auto& a = expr.as<AstExprCall>())
{
visualizeWithSelf(*a->func, a->self);
visualize(*a->func);
writer.symbol("(");
bool first = true;
@ -385,7 +371,7 @@ struct Printer
else if (const auto& a = expr.as<AstExprIndexName>())
{
visualize(*a->expr);
writer.symbol(".");
writer.symbol(std::string(1, a->op));
writer.write(a->index.value);
}
else if (const auto& a = expr.as<AstExprIndexExpr>())
@ -766,7 +752,7 @@ struct Printer
else if (const auto& a = program.as<AstStatFunction>())
{
writer.keyword("function");
visualizeWithSelf(*a->name, a->func->self != nullptr);
visualize(*a->name);
visualizeFunctionBody(*a->func);
}
else if (const auto& a = program.as<AstStatLocalFunction>())

View File

@ -4884,6 +4884,13 @@ TypePackId TypeChecker::freshTypePack(TypeLevel level)
}
TypeId TypeChecker::resolveType(const ScopePtr& scope, const AstType& annotation)
{
TypeId ty = resolveTypeWorker(scope, annotation);
currentModule->astResolvedTypes[&annotation] = ty;
return ty;
}
TypeId TypeChecker::resolveTypeWorker(const ScopePtr& scope, const AstType& annotation)
{
if (const auto& lit = annotation.as<AstTypeReference>())
{
@ -5200,9 +5207,10 @@ TypePackId TypeChecker::resolveTypePack(const ScopePtr& scope, const AstTypeList
TypePackId TypeChecker::resolveTypePack(const ScopePtr& scope, const AstTypePack& annotation)
{
TypePackId result;
if (const AstTypePackVariadic* variadic = annotation.as<AstTypePackVariadic>())
{
return addTypePack(TypePackVar{VariadicTypePack{resolveType(scope, *variadic->variadicType)}});
result = addTypePack(TypePackVar{VariadicTypePack{resolveType(scope, *variadic->variadicType)}});
}
else if (const AstTypePackGeneric* generic = annotation.as<AstTypePackGeneric>())
{
@ -5216,10 +5224,12 @@ TypePackId TypeChecker::resolveTypePack(const ScopePtr& scope, const AstTypePack
else
reportError(TypeError{generic->location, UnknownSymbol{genericName, UnknownSymbol::Type}});
return errorRecoveryTypePack(scope);
result = errorRecoveryTypePack(scope);
}
else
{
result = *genericTy;
}
return *genericTy;
}
else if (const AstTypePackExplicit* explicitTp = annotation.as<AstTypePackExplicit>())
{
@ -5229,14 +5239,17 @@ TypePackId TypeChecker::resolveTypePack(const ScopePtr& scope, const AstTypePack
types.push_back(resolveType(scope, *type));
if (auto tailType = explicitTp->typeList.tailType)
return addTypePack(types, resolveTypePack(scope, *tailType));
return addTypePack(types);
result = addTypePack(types, resolveTypePack(scope, *tailType));
else
result = addTypePack(types);
}
else
{
ice("Unknown AstTypePack kind");
}
currentModule->astResolvedTypePacks[&annotation] = result;
return result;
}
bool ApplyTypeFunction::isDirty(TypeId ty)

View File

@ -8,6 +8,10 @@
#include "FileUtils.h"
#ifdef CALLGRIND
#include <valgrind/callgrind.h>
#endif
LUAU_FASTFLAG(DebugLuauTimeTracing)
LUAU_FASTFLAG(LuauTypeMismatchModuleNameResolution)
@ -112,6 +116,7 @@ static void displayHelp(const char* argv0)
printf("Available options:\n");
printf(" --formatter=plain: report analysis errors in Luacheck-compatible format\n");
printf(" --formatter=gnu: report analysis errors in GNU-compatible format\n");
printf(" --mode=strict: default to strict mode when typechecking\n");
printf(" --timetrace: record compiler time tracing information into trace.json\n");
}
@ -178,9 +183,9 @@ struct CliConfigResolver : Luau::ConfigResolver
mutable std::unordered_map<std::string, Luau::Config> configCache;
mutable std::vector<std::pair<std::string, std::string>> configErrors;
CliConfigResolver()
CliConfigResolver(Luau::Mode mode)
{
defaultConfig.mode = Luau::Mode::Nonstrict;
defaultConfig.mode = mode;
}
const Luau::Config& getConfig(const Luau::ModuleName& name) const override
@ -229,6 +234,7 @@ int main(int argc, char** argv)
}
ReportFormat format = ReportFormat::Default;
Luau::Mode mode = Luau::Mode::Nonstrict;
bool annotate = false;
for (int i = 1; i < argc; ++i)
@ -240,6 +246,8 @@ int main(int argc, char** argv)
format = ReportFormat::Luacheck;
else if (strcmp(argv[i], "--formatter=gnu") == 0)
format = ReportFormat::Gnu;
else if (strcmp(argv[i], "--mode=strict") == 0)
mode = Luau::Mode::Strict;
else if (strcmp(argv[i], "--annotate") == 0)
annotate = true;
else if (strcmp(argv[i], "--timetrace") == 0)
@ -258,12 +266,16 @@ int main(int argc, char** argv)
frontendOptions.retainFullTypeGraphs = annotate;
CliFileResolver fileResolver;
CliConfigResolver configResolver;
CliConfigResolver configResolver(mode);
Luau::Frontend frontend(&fileResolver, &configResolver, frontendOptions);
Luau::registerBuiltinTypes(frontend.typeChecker);
Luau::freeze(frontend.typeChecker.globalTypes);
#ifdef CALLGRIND
CALLGRIND_ZERO_STATS;
#endif
std::vector<std::string> files = getSourceFiles(argc, argv);
int failed = 0;

View File

@ -21,6 +21,10 @@
#include <fcntl.h>
#endif
#ifdef CALLGRIND
#include <valgrind/callgrind.h>
#endif
#include <locale.h>
LUAU_FASTFLAG(DebugLuauTimeTracing)
@ -166,6 +170,36 @@ static int lua_collectgarbage(lua_State* L)
luaL_error(L, "collectgarbage must be called with 'count' or 'collect'");
}
#ifdef CALLGRIND
static int lua_callgrind(lua_State* L)
{
const char* option = luaL_checkstring(L, 1);
if (strcmp(option, "running") == 0)
{
int r = RUNNING_ON_VALGRIND;
lua_pushboolean(L, r);
return 1;
}
if (strcmp(option, "zero") == 0)
{
CALLGRIND_ZERO_STATS;
return 0;
}
if (strcmp(option, "dump") == 0)
{
const char* name = luaL_checkstring(L, 2);
CALLGRIND_DUMP_STATS_AT(name);
return 0;
}
luaL_error(L, "callgrind must be called with one of 'running', 'zero', 'dump'");
}
#endif
void setupState(lua_State* L)
{
luaL_openlibs(L);
@ -174,6 +208,9 @@ void setupState(lua_State* L)
{"loadstring", lua_loadstring},
{"require", lua_require},
{"collectgarbage", lua_collectgarbage},
#ifdef CALLGRIND
{"callgrind", lua_callgrind},
#endif
{NULL, NULL},
};

View File

@ -93,6 +93,10 @@ ifeq ($(config),fuzz)
LDFLAGS+=-fsanitize=address,fuzzer
endif
ifneq ($(CALLGRIND),)
CXXFLAGS+=-DCALLGRIND=$(CALLGRIND)
endif
# target-specific flags
$(AST_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include
$(COMPILER_OBJECTS): CXXFLAGS+=-std=c++17 -ICompiler/include -ICommon/include -IAst/include

View File

@ -40,6 +40,7 @@ argumentParser.add_argument('--results', dest='results',type=str,nargs='*',help=
argumentParser.add_argument('--run-test', action='store', default=None, help='Regex test filter')
argumentParser.add_argument('--extra-loops', action='store',type=int,default=0, help='Amount of times to loop over one test (one test already performs multiple runs)')
argumentParser.add_argument('--filename', action='store',type=str,default='bench', help='File name for graph and results file')
argumentParser.add_argument('--callgrind', dest='callgrind',action='store_const',const=1,default=0,help='Use callgrind to run benchmarks')
if matplotlib != None:
argumentParser.add_argument('--absolute', dest='absolute',action='store_const',const=1,default=0,help='Display absolute values instead of relative (enabled by default when benchmarking a single VM)')
@ -55,6 +56,9 @@ argumentParser.add_argument('--no-print-influx-debugging', action='store_false',
argumentParser.add_argument('--no-print-final-summary', action='store_false', dest='print_final_summary', help="Don't print a table summarizing the results after all tests are run")
# Assume 2.5 IPC on a 4 GHz CPU; this is obviously incorrect but it allows us to display simulated instruction counts using regular time units
CALLGRIND_INSN_PER_SEC = 2.5 * 4e9
def arrayRange(count):
result = []
@ -71,6 +75,21 @@ def arrayRangeOffset(count, offset):
return result
def getCallgrindOutput(lines):
result = []
name = None
for l in lines:
if l.startswith("desc: Trigger: Client Request: "):
name = l[31:].strip()
elif l.startswith("summary: ") and name != None:
insn = int(l[9:])
# Note: we only run each bench once under callgrind so we only report a single time per run; callgrind instruction count variance is ~0.01% so it might as well be zero
result += "|><|" + name + "|><|" + str(insn / CALLGRIND_INSN_PER_SEC * 1000.0) + "||_||"
name = None
return "".join(result)
def getVmOutput(cmd):
if os.name == "nt":
try:
@ -79,6 +98,14 @@ def getVmOutput(cmd):
exit(1)
except:
return ""
elif arguments.callgrind:
try:
subprocess.check_call("valgrind --tool=callgrind --callgrind-out-file=callgrind.out --combine-dumps=yes --dump-line=no " + cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, cwd=scriptdir)
file = open(os.path.join(scriptdir, "callgrind.out"), "r")
lines = file.readlines()
return getCallgrindOutput(lines)
except:
return ""
else:
with subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, cwd=scriptdir) as p:
# Try to lock to a single processor

View File

@ -5,6 +5,16 @@ bench.runs = 20
bench.extraRuns = 4
function bench.runCode(f, description)
-- Under Callgrind, run the test only once and measure just the execution cost
if callgrind and callgrind("running") then
if collectgarbage then collectgarbage() end
callgrind("zero")
f() -- unfortunately we can't easily separate setup cost from runtime cost in f unless it calls callgrind()
callgrind("dump", description)
return
end
local timeTable = {}
for i = 1,bench.runs + bench.extraRuns do

View File

@ -1,5 +1,4 @@
-- This file is part of the Roblox luau-polyfill repository and is licensed under MIT License; see LICENSE.txt for details
--!nonstrict
-- #region Array
-- Array related
local Array = {}

2089
bench/other/regex.lua Normal file

File diff suppressed because it is too large Load Diff

View File

@ -54,7 +54,7 @@ Sandboxing challenges are [covered in the dedicated section](sandbox).
| goto statement | ❌ | this complicates the compiler, makes control flow unstructured and doesn't address a significant need |
| finalizers for tables | ❌ | no `__gc` support due to sandboxing and performance/complexity |
| no more fenv for threads or functions | 😞 | we love this, but it breaks compatibility |
| tables honor the `__len` metamethod | 🤷‍♀️ | performance implications, no strong use cases
| tables honor the `__len` metamethod | ✔️ | |
| hex and `\z` escapes in strings | ✔️ | |
| support for hexadecimal floats | 🤷‍♀️ | no strong use cases |
| order metamethods work for different types | ❌ | no strong use cases and more complicated semantics, compatibility and performance implications |

View File

@ -104,15 +104,15 @@ From the type checker perspective, each table can be in one of three states. The
### Unsealed tables
An unsealed table is a table whose properties could still be tacked on. This occurs when the table constructor literal had zero expressions. This is one way to accumulate knowledge of the shape of this table.
An unsealed table is a table which supports adding new properties, which updates the tables type. Unsealed tables are created using table literals. This is one way to accumulate knowledge of the shape of this table.
```lua
local t = {} -- {}
t.x = 1 -- {x: number}
t.y = 2 -- {x: number, y: number}
local t = {x = 1} -- {x: number}
t.y = 2 -- {x: number, y: number}
t.z = 3 -- {x: number, y: number, z: number}
```
However, if this local were written as `local t: {} = {}`, it ends up sealing the table, so the two assignments henceforth will not be ok.
However, if this local were written as `local t: { x: number } = { x = 1 }`, it ends up sealing the table, so the two assignments henceforth will not be ok.
Furthermore, once we exit the scope where this unsealed table was created in, we seal it.
@ -128,16 +128,25 @@ local v2 = vec2(1, 2)
v2.z = 3 -- not ok
```
### Sealed tables
A sealed table is a table that is now locked down. This occurs when the table constructor literal had 1 or more expression, or when the table type is spelt out explicitly via a type annotation.
Unsealed tables are *exact* in that any property of the table must be named by the type. Since Luau treats missing properties as having value `nil`, this means that we can treat an unsealed table which does not mention a property as if it mentioned the property, as long as that property is optional.
```lua
local t = {x = 1} -- {x: number}
t.y = 2 -- not ok
local t = {x = 1}
local u : { x : number, y : number? } = t -- ok because y is optional
local v : { x : number, z : number } = t -- not ok because z is not optional
```
Sealed tables support *width subtyping*, which allows a table with more properties to be used as a table with fewer
### Sealed tables
A sealed table is a table that is now locked down. This occurs when the table type is spelled out explicitly via a type annotation, or if it is returned from a function.
```lua
local t : { x: number } = {x = 1}
t.y = 2 -- not ok
```
Sealed tables are *inexact* in that the table may have properties which are not mentioned in the type.
As a result, sealed tables support *width subtyping*, which allows a table with more properties to be used as a table with fewer
```lua
type Point1D = { x : number }

View File

@ -791,6 +791,8 @@ TEST_CASE_FIXTURE(FrontendFixture, "discard_type_graphs")
CHECK_EQ(0, module->internalTypes.typeVars.size());
CHECK_EQ(0, module->internalTypes.typePacks.size());
CHECK_EQ(0, module->astTypes.size());
CHECK_EQ(0, module->astResolvedTypes.size());
CHECK_EQ(0, module->astResolvedTypePacks.size());
}
TEST_CASE_FIXTURE(FrontendFixture, "it_should_be_safe_to_stringify_errors_when_full_type_graph_is_discarded")

View File

@ -96,6 +96,37 @@ TEST_CASE_FIXTURE(Fixture, "table_respects_use_line_break")
//clang-format on
}
TEST_CASE_FIXTURE(Fixture, "metatable")
{
TypeVar table{TypeVariant(TableTypeVar())};
TypeVar metatable{TypeVariant(TableTypeVar())};
TypeVar mtv{TypeVariant(MetatableTypeVar{&table, &metatable})};
CHECK_EQ("{ @metatable { }, { } }", toString(&mtv));
}
TEST_CASE_FIXTURE(Fixture, "named_metatable")
{
TypeVar table{TypeVariant(TableTypeVar())};
TypeVar metatable{TypeVariant(TableTypeVar())};
TypeVar mtv{TypeVariant(MetatableTypeVar{&table, &metatable, "NamedMetatable"})};
CHECK_EQ("NamedMetatable", toString(&mtv));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "named_metatable_toStringNamedFunction")
{
CheckResult result = check(R"(
local function createTbl(): NamedMetatable
return setmetatable({}, {})
end
type NamedMetatable = typeof(createTbl())
)");
TypeId ty = requireType("createTbl");
const FunctionTypeVar* ftv = get<FunctionTypeVar>(follow(ty));
REQUIRE(ftv);
CHECK_EQ("createTbl(): NamedMetatable", toStringNamedFunction("createTbl", *ftv));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "exhaustive_toString_of_cyclic_table")
{
CheckResult result = check(R"(

View File

@ -583,7 +583,7 @@ TEST_CASE_FIXTURE(Fixture, "transpile_error_expr")
auto names = AstNameTable{allocator};
ParseResult parseResult = Parser::parse(code.data(), code.size(), names, allocator, {});
CHECK_EQ("local a = (error-expr: f.%error-id%)-(error-expr)", transpileWithTypes(*parseResult.root));
CHECK_EQ("local a = (error-expr: f:%error-id%)-(error-expr)", transpileWithTypes(*parseResult.root));
}
TEST_CASE_FIXTURE(Fixture, "transpile_error_stat")

View File

@ -518,7 +518,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "greedy_inference_with_shared_self_triggers_f
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Not all codepaths in this function return '{ @metatable T, {| |} }, a...'.", toString(result.errors[0]));
CHECK_EQ("Not all codepaths in this function return 'self, a...'.", toString(result.errors[0]));
}
TEST_SUITE_END();

View File

@ -1003,4 +1003,27 @@ TEST_CASE_FIXTURE(Fixture, "do_not_bind_a_free_table_to_a_union_containing_that_
)");
}
TEST_CASE_FIXTURE(Fixture, "types stored in astResolvedTypes")
{
CheckResult result = check(R"(
type alias = typeof("hello")
local function foo(param: alias)
end
)");
auto node = findNodeAtPosition(*getMainSourceModule(), {2, 16});
auto ty = lookupType("alias");
REQUIRE(node);
REQUIRE(node->is<AstExprFunction>());
REQUIRE(ty);
auto func = node->as<AstExprFunction>();
REQUIRE(func->args.size == 1);
auto arg = *func->args.begin();
auto annotation = arg->annotation;
CHECK_EQ(*getMainModule()->astResolvedTypes.find(annotation), *ty);
}
TEST_SUITE_END();