Sync to upstream/release/549 (#707)

* Reoptimized math.min/max/bit32 builtins assuming at least 2 arguments are used (1-2% lift on some benchmarks)
* Type errors that mention function types no longer have redundant parenthesis around return type
* Luau REPL now supports --compile=remarks which displays the source code with optimization remarks embedded as comments
* Builtin calls are slightly faster when called with 1-2 arguments (~1% improvement in some benchmarks)
This commit is contained in:
vegorov-rbx 2022-10-14 12:48:41 -07:00 committed by GitHub
parent ff736fd3e4
commit 76070f8da2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
104 changed files with 4099 additions and 2223 deletions

View File

@ -76,8 +76,8 @@ struct ConstraintSolver
DcrLogger* logger;
explicit ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope> rootScope, ModuleName moduleName,
NotNull<ModuleResolver> moduleResolver, std::vector<RequireCycle> requireCycles, DcrLogger* logger);
explicit ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope> rootScope, ModuleName moduleName, NotNull<ModuleResolver> moduleResolver,
std::vector<RequireCycle> requireCycles, DcrLogger* logger);
// Randomize the order in which to dispatch constraints
void randomize(unsigned seed);
@ -88,7 +88,9 @@ struct ConstraintSolver
**/
void run();
bool done();
bool isDone();
void finalizeModule();
/** Attempt to dispatch a constraint. Returns true if it was successful. If
* tryDispatch() returns false, the constraint remains in the unsolved set

View File

@ -112,11 +112,13 @@ struct DcrLogger
void popBlock(NotNull<const Constraint> block);
void captureInitialSolverState(const Scope* rootScope, const std::vector<NotNull<const Constraint>>& unsolvedConstraints);
StepSnapshot prepareStepSnapshot(const Scope* rootScope, NotNull<const Constraint> current, bool force, const std::vector<NotNull<const Constraint>>& unsolvedConstraints);
StepSnapshot prepareStepSnapshot(
const Scope* rootScope, NotNull<const Constraint> current, bool force, const std::vector<NotNull<const Constraint>>& unsolvedConstraints);
void commitStepSnapshot(StepSnapshot snapshot);
void captureFinalSolverState(const Scope* rootScope, const std::vector<NotNull<const Constraint>>& unsolvedConstraints);
void captureTypeCheckError(const TypeError& error);
private:
ConstraintGenerationLog generationLog;
std::unordered_map<NotNull<const Constraint>, std::vector<ConstraintBlockTarget>> constraintBlocks;

View File

@ -33,7 +33,6 @@ struct UnknownSymbol
{
Binding,
Type,
Generic
};
Name name;
Context context;

View File

@ -240,7 +240,7 @@ void write(JsonEmitter& emitter, const std::unordered_map<std::string, T>& map)
for (const auto& [k, v] : map)
o.writePair(k, v);
o.finish();
}

View File

@ -17,8 +17,10 @@ struct SingletonTypes;
using ModulePtr = std::shared_ptr<Module>;
bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop = true);
bool isSubtype(TypePackId subTy, TypePackId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop = true);
bool isSubtype(
TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop = true);
bool isSubtype(TypePackId subTy, TypePackId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice,
bool anyIsTop = true);
std::pair<TypeId, bool> normalize(
TypeId ty, NotNull<Scope> scope, TypeArena& arena, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice);
@ -68,13 +70,14 @@ public:
insert(*it);
}
bool operator ==(const TypeIds& there) const;
bool operator==(const TypeIds& there) const;
size_t getHash() const;
};
} // namespace Luau
template<> struct std::hash<Luau::TypeIds>
template<>
struct std::hash<Luau::TypeIds>
{
std::size_t operator()(const Luau::TypeIds& tys) const
{
@ -82,7 +85,8 @@ template<> struct std::hash<Luau::TypeIds>
}
};
template<> struct std::hash<const Luau::TypeIds*>
template<>
struct std::hash<const Luau::TypeIds*>
{
std::size_t operator()(const Luau::TypeIds* tys) const
{
@ -90,7 +94,8 @@ template<> struct std::hash<const Luau::TypeIds*>
}
};
template<> struct std::equal_to<Luau::TypeIds>
template<>
struct std::equal_to<Luau::TypeIds>
{
bool operator()(const Luau::TypeIds& here, const Luau::TypeIds& there) const
{
@ -98,7 +103,8 @@ template<> struct std::equal_to<Luau::TypeIds>
}
};
template<> struct std::equal_to<const Luau::TypeIds*>
template<>
struct std::equal_to<const Luau::TypeIds*>
{
bool operator()(const Luau::TypeIds* here, const Luau::TypeIds* there) const
{
@ -160,7 +166,7 @@ struct NormalizedType
// The string part of the type.
// This may be the `string` type, or a union of singletons.
NormalizedStringType strings = std::map<std::string,TypeId>{};
NormalizedStringType strings = std::map<std::string, TypeId>{};
// The thread part of the type.
// This type is either never or thread.

View File

@ -397,7 +397,7 @@ private:
std::vector<std::pair<TypeId, ScopePtr>> deferredQuantification;
};
using PrintLineProc = void(*)(const std::string&);
using PrintLineProc = void (*)(const std::string&);
extern PrintLineProc luauPrintLine;

View File

@ -61,15 +61,15 @@ struct Unifier
ErrorVec errors;
Location location;
Variance variance = Covariant;
bool anyIsTop = false; // If true, we consider any to be a top type. If false, it is a familiar but weird mix of top and bottom all at once.
bool normalize; // Normalize unions and intersections if necessary
bool anyIsTop = false; // If true, we consider any to be a top type. If false, it is a familiar but weird mix of top and bottom all at once.
bool normalize; // Normalize unions and intersections if necessary
bool useScopes = false; // If true, we use the scope hierarchy rather than TypeLevels
CountMismatch::Context ctx = CountMismatch::Arg;
UnifierSharedState& sharedState;
Unifier(NotNull<Normalizer> normalizer, Mode mode, NotNull<Scope> scope, const Location& location, Variance variance,
TxnLog* parentLog = nullptr);
Unifier(
NotNull<Normalizer> normalizer, Mode mode, NotNull<Scope> scope, const Location& location, Variance variance, TxnLog* parentLog = nullptr);
// Test whether the two type vars unify. Never commits the result.
ErrorVec canUnify(TypeId subTy, TypeId superTy);
@ -87,7 +87,8 @@ private:
void tryUnifyTypeWithUnion(TypeId subTy, TypeId superTy, const UnionTypeVar* uv, bool cacheEnabled, bool isFunctionCall);
void tryUnifyTypeWithIntersection(TypeId subTy, TypeId superTy, const IntersectionTypeVar* uv);
void tryUnifyIntersectionWithType(TypeId subTy, const IntersectionTypeVar* uv, TypeId superTy, bool cacheEnabled, bool isFunctionCall);
void tryUnifyNormalizedTypes(TypeId subTy, TypeId superTy, const NormalizedType& subNorm, const NormalizedType& superNorm, std::string reason, std::optional<TypeError> error = std::nullopt);
void tryUnifyNormalizedTypes(TypeId subTy, TypeId superTy, const NormalizedType& subNorm, const NormalizedType& superNorm, std::string reason,
std::optional<TypeError> error = std::nullopt);
void tryUnifyPrimitives(TypeId subTy, TypeId superTy);
void tryUnifySingletons(TypeId subTy, TypeId superTy);
void tryUnifyFunctions(TypeId subTy, TypeId superTy, bool isFunctionCall = false);

View File

@ -497,7 +497,7 @@ static bool dcrMagicFunctionSelect(MagicFunctionCallContext context)
asMutable(context.result)->ty.emplace<BoundTypePack>(resTypePack);
}
else if (tail)
asMutable(context.result)->ty.emplace<BoundTypePack>(*tail);
asMutable(context.result)->ty.emplace<BoundTypePack>(*tail);
return true;
}
@ -507,7 +507,8 @@ static bool dcrMagicFunctionSelect(MagicFunctionCallContext context)
if (AstExprConstantString* str = arg1->as<AstExprConstantString>())
{
if (str->value.size == 1 && str->value.data[0] == '#') {
if (str->value.size == 1 && str->value.data[0] == '#')
{
TypePackId numberTypePack = context.solver->arena->addTypePack({context.solver->singletonTypes->numberType});
asMutable(context.result)->ty.emplace<BoundTypePack>(numberTypePack);
return true;

View File

@ -43,7 +43,7 @@ static bool matchSetmetatable(const AstExprCall& call)
if (call.args.size != 2)
return false;
const AstExprGlobal* funcAsGlobal = call.func->as<AstExprGlobal>();
if (!funcAsGlobal || funcAsGlobal->name != smt)
return false;
@ -52,7 +52,8 @@ static bool matchSetmetatable(const AstExprCall& call)
}
ConstraintGraphBuilder::ConstraintGraphBuilder(const ModuleName& moduleName, ModulePtr module, TypeArena* arena,
NotNull<ModuleResolver> moduleResolver, NotNull<SingletonTypes> singletonTypes, NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope, DcrLogger* logger)
NotNull<ModuleResolver> moduleResolver, NotNull<SingletonTypes> singletonTypes, NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope,
DcrLogger* logger)
: moduleName(moduleName)
, module(module)
, singletonTypes(singletonTypes)

View File

@ -14,8 +14,6 @@
#include "Luau/VisitTypeVar.h"
#include "Luau/TypeUtils.h"
#include <random>
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolver, false);
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false);
LUAU_FASTFLAG(LuauFixNameMaps)
@ -283,13 +281,27 @@ ConstraintSolver::ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope
void ConstraintSolver::randomize(unsigned seed)
{
std::mt19937 g(seed);
std::shuffle(begin(unsolvedConstraints), end(unsolvedConstraints), g);
if (unsolvedConstraints.empty())
return;
unsigned int rng = seed;
for (size_t i = unsolvedConstraints.size() - 1; i > 0; --i)
{
// Fisher-Yates shuffle
size_t j = rng % (i + 1);
std::swap(unsolvedConstraints[i], unsolvedConstraints[j]);
// LCG RNG, constants from Numerical Recipes
// This may occasionally result in skewed shuffles due to distribution properties, but this is a debugging tool so it should be good enough
rng = rng * 1664525 + 1013904223;
}
}
void ConstraintSolver::run()
{
if (done())
if (isDone())
return;
if (FFlag::DebugLuauLogSolver)
@ -364,6 +376,8 @@ void ConstraintSolver::run()
progress |= runSolverPass(true);
} while (progress);
finalizeModule();
if (FFlag::DebugLuauLogSolver)
{
dumpBindings(rootScope, opts);
@ -375,11 +389,24 @@ void ConstraintSolver::run()
}
}
bool ConstraintSolver::done()
bool ConstraintSolver::isDone()
{
return unsolvedConstraints.empty();
}
void ConstraintSolver::finalizeModule()
{
Anyification a{arena, rootScope, singletonTypes, &iceReporter, singletonTypes->anyType, singletonTypes->anyTypePack};
std::optional<TypePackId> returnType = a.substitute(rootScope->returnType);
if (!returnType)
{
reportError(CodeTooComplex{}, Location{});
rootScope->returnType = singletonTypes->errorTypePack;
}
else
rootScope->returnType = *returnType;
}
bool ConstraintSolver::tryDispatch(NotNull<const Constraint> constraint, bool force)
{
if (!force && isBlocked(constraint))
@ -506,25 +533,25 @@ bool ConstraintSolver::tryDispatch(const UnaryConstraint& c, NotNull<const Const
switch (c.op)
{
case AstExprUnary::Not:
case AstExprUnary::Not:
{
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(singletonTypes->booleanType);
return true;
}
case AstExprUnary::Len:
{
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(singletonTypes->numberType);
return true;
}
case AstExprUnary::Minus:
{
if (isNumber(operandType) || get<AnyTypeVar>(operandType) || get<ErrorTypeVar>(operandType))
{
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(singletonTypes->booleanType);
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(c.operandType);
return true;
}
case AstExprUnary::Len:
{
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(singletonTypes->numberType);
return true;
}
case AstExprUnary::Minus:
{
if (isNumber(operandType) || get<AnyTypeVar>(operandType) || get<ErrorTypeVar>(operandType))
{
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(c.operandType);
return true;
}
break;
}
break;
}
}
LUAU_ASSERT(false); // TODO metatable handling

View File

@ -57,7 +57,7 @@ void write(JsonEmitter& emitter, const ConstraintGenerationLog& log)
emitter.writeRaw(":");
ObjectEmitter locationEmitter = emitter.writeObject();
for (const auto& [id, location] : log.constraintLocations)
{
locationEmitter.writePair(id, location);
@ -232,7 +232,7 @@ void DcrLogger::captureSource(std::string source)
void DcrLogger::captureGenerationError(const TypeError& error)
{
std::string stringifiedError = toString(error);
generationLog.errors.push_back(ErrorSnapshot {
generationLog.errors.push_back(ErrorSnapshot{
/* message */ stringifiedError,
/* location */ error.location,
});
@ -298,7 +298,8 @@ void DcrLogger::captureInitialSolverState(const Scope* rootScope, const std::vec
}
}
StepSnapshot DcrLogger::prepareStepSnapshot(const Scope* rootScope, NotNull<const Constraint> current, bool force, const std::vector<NotNull<const Constraint>>& unsolvedConstraints)
StepSnapshot DcrLogger::prepareStepSnapshot(
const Scope* rootScope, NotNull<const Constraint> current, bool force, const std::vector<NotNull<const Constraint>>& unsolvedConstraints)
{
ScopeSnapshot scopeSnapshot = snapshotScope(rootScope, opts);
std::string currentId = toPointerId(current);
@ -344,7 +345,7 @@ void DcrLogger::captureFinalSolverState(const Scope* rootScope, const std::vecto
void DcrLogger::captureTypeCheckError(const TypeError& error)
{
std::string stringifiedError = toString(error);
checkLog.errors.push_back(ErrorSnapshot {
checkLog.errors.push_back(ErrorSnapshot{
/* message */ stringifiedError,
/* location */ error.location,
});
@ -359,7 +360,7 @@ std::vector<ConstraintBlock> DcrLogger::snapshotBlocks(NotNull<const Constraint>
}
std::vector<ConstraintBlock> snapshot;
for (const ConstraintBlockTarget& target : it->second)
{
if (const TypeId* ty = get_if<TypeId>(&target))

View File

@ -8,7 +8,6 @@
#include <stdexcept>
LUAU_FASTFLAGVARIABLE(LuauTypeMismatchModuleNameResolution, false)
LUAU_FASTFLAGVARIABLE(LuauUseInternalCompilerErrorException, false)
static std::string wrongNumberOfArgsString(
size_t expectedCount, std::optional<size_t> maximumCount, size_t actualCount, const char* argPrefix = nullptr, bool isVariadic = false)
@ -122,8 +121,6 @@ struct ErrorConverter
return "Unknown global '" + e.name + "'";
case UnknownSymbol::Type:
return "Unknown type '" + e.name + "'";
case UnknownSymbol::Generic:
return "Unknown generic '" + e.name + "'";
}
LUAU_ASSERT(!"Unexpected context for UnknownSymbol");
@ -902,46 +899,22 @@ void copyErrors(ErrorVec& errors, TypeArena& destArena)
void InternalErrorReporter::ice(const std::string& message, const Location& location)
{
if (FFlag::LuauUseInternalCompilerErrorException)
{
InternalCompilerError error(message, moduleName, location);
InternalCompilerError error(message, moduleName, location);
if (onInternalError)
onInternalError(error.what());
if (onInternalError)
onInternalError(error.what());
throw error;
}
else
{
std::runtime_error error("Internal error in " + moduleName + " at " + toString(location) + ": " + message);
if (onInternalError)
onInternalError(error.what());
throw error;
}
throw error;
}
void InternalErrorReporter::ice(const std::string& message)
{
if (FFlag::LuauUseInternalCompilerErrorException)
{
InternalCompilerError error(message, moduleName);
InternalCompilerError error(message, moduleName);
if (onInternalError)
onInternalError(error.what());
if (onInternalError)
onInternalError(error.what());
throw error;
}
else
{
std::runtime_error error("Internal error in " + moduleName + ": " + message);
if (onInternalError)
onInternalError(error.what());
throw error;
}
throw error;
}
const char* InternalCompilerError::what() const throw()

View File

@ -14,7 +14,6 @@
LUAU_FASTINTVARIABLE(LuauSuggestionDistance, 4)
LUAU_FASTFLAGVARIABLE(LuauLintGlobalNeverReadBeforeWritten, false)
LUAU_FASTFLAGVARIABLE(LuauLintFixDeprecationMessage, false)
namespace Luau
{
@ -307,22 +306,11 @@ private:
emitWarning(*context, LintWarning::Code_UnknownGlobal, gv->location, "Unknown global '%s'", gv->name.value);
else if (g->deprecated)
{
if (FFlag::LuauLintFixDeprecationMessage)
{
if (const char* replacement = *g->deprecated; replacement && strlen(replacement))
emitWarning(*context, LintWarning::Code_DeprecatedGlobal, gv->location, "Global '%s' is deprecated, use '%s' instead",
gv->name.value, replacement);
else
emitWarning(*context, LintWarning::Code_DeprecatedGlobal, gv->location, "Global '%s' is deprecated", gv->name.value);
}
if (const char* replacement = *g->deprecated; replacement && strlen(replacement))
emitWarning(*context, LintWarning::Code_DeprecatedGlobal, gv->location, "Global '%s' is deprecated, use '%s' instead",
gv->name.value, replacement);
else
{
if (*g->deprecated)
emitWarning(*context, LintWarning::Code_DeprecatedGlobal, gv->location, "Global '%s' is deprecated, use '%s' instead",
gv->name.value, *g->deprecated);
else
emitWarning(*context, LintWarning::Code_DeprecatedGlobal, gv->location, "Global '%s' is deprecated", gv->name.value);
}
emitWarning(*context, LintWarning::Code_DeprecatedGlobal, gv->location, "Global '%s' is deprecated", gv->name.value);
}
}

View File

@ -15,7 +15,6 @@
#include <algorithm>
LUAU_FASTFLAG(LuauAnyifyModuleReturnGenerics)
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAGVARIABLE(LuauForceExportSurfacesToBeNormal, false);
LUAU_FASTFLAGVARIABLE(LuauClonePublicInterfaceLess, false);
@ -244,19 +243,6 @@ void Module::clonePublicInterface(NotNull<SingletonTypes> singletonTypes, Intern
ForceNormal forceNormal{&interfaceTypes};
if (FFlag::LuauLowerBoundsCalculation)
{
normalize(returnType, NotNull{this}, singletonTypes, ice);
if (FFlag::LuauForceExportSurfacesToBeNormal)
forceNormal.traverse(returnType);
if (varargPack)
{
normalize(*varargPack, NotNull{this}, singletonTypes, ice);
if (FFlag::LuauForceExportSurfacesToBeNormal)
forceNormal.traverse(*varargPack);
}
}
if (exportedTypeBindings)
{
for (auto& [name, tf] : *exportedTypeBindings)
@ -265,24 +251,6 @@ void Module::clonePublicInterface(NotNull<SingletonTypes> singletonTypes, Intern
tf = clonePublicInterface.cloneTypeFun(tf);
else
tf = clone(tf, interfaceTypes, cloneState);
if (FFlag::LuauLowerBoundsCalculation)
{
normalize(tf.type, NotNull{this}, singletonTypes, ice);
// We're about to freeze the memory. We know that the flag is conservative by design. Cyclic tables
// won't be marked normal. If the types aren't normal by now, they never will be.
forceNormal.traverse(tf.type);
for (GenericTypeDefinition param : tf.typeParams)
{
forceNormal.traverse(param.ty);
if (param.defaultValue)
{
normalize(*param.defaultValue, NotNull{this}, singletonTypes, ice);
forceNormal.traverse(*param.defaultValue);
}
}
}
}
}
@ -305,13 +273,6 @@ void Module::clonePublicInterface(NotNull<SingletonTypes> singletonTypes, Intern
ty = clonePublicInterface.cloneType(ty);
else
ty = clone(ty, interfaceTypes, cloneState);
if (FFlag::LuauLowerBoundsCalculation)
{
normalize(ty, NotNull{this}, singletonTypes, ice);
if (FFlag::LuauForceExportSurfacesToBeNormal)
forceNormal.traverse(ty);
}
}
freeze(internalTypes);

View File

@ -119,17 +119,9 @@ NormalizedType::NormalizedType(NotNull<SingletonTypes> singletonTypes)
static bool isInhabited(const NormalizedType& norm)
{
return !get<NeverTypeVar>(norm.tops)
|| !get<NeverTypeVar>(norm.booleans)
|| !norm.classes.empty()
|| !get<NeverTypeVar>(norm.errors)
|| !get<NeverTypeVar>(norm.nils)
|| !get<NeverTypeVar>(norm.numbers)
|| !norm.strings || !norm.strings->empty()
|| !get<NeverTypeVar>(norm.threads)
|| norm.functions
|| !norm.tables.empty()
|| !norm.tyvars.empty();
return !get<NeverTypeVar>(norm.tops) || !get<NeverTypeVar>(norm.booleans) || !norm.classes.empty() || !get<NeverTypeVar>(norm.errors) ||
!get<NeverTypeVar>(norm.nils) || !get<NeverTypeVar>(norm.numbers) || !norm.strings || !norm.strings->empty() ||
!get<NeverTypeVar>(norm.threads) || norm.functions || !norm.tables.empty() || !norm.tyvars.empty();
}
static int tyvarIndex(TypeId ty)
@ -139,7 +131,7 @@ static int tyvarIndex(TypeId ty)
else if (const FreeTypeVar* ftv = get<FreeTypeVar>(ty))
return ftv->index;
else
return 0;
return 0;
}
#ifdef LUAU_ASSERTENABLED
@ -193,7 +185,7 @@ static bool isNormalizedString(const NormalizedStringType& ty)
{
if (!ty)
return true;
for (auto& [str, ty] : *ty)
{
if (const SingletonTypeVar* stv = get<SingletonTypeVar>(ty))
@ -272,24 +264,24 @@ static bool isNormalizedTyvar(const NormalizedTyvars& tyvars)
static void assertInvariant(const NormalizedType& norm)
{
#ifdef LUAU_ASSERTENABLED
if (!FFlag::DebugLuauCheckNormalizeInvariant)
return;
#ifdef LUAU_ASSERTENABLED
if (!FFlag::DebugLuauCheckNormalizeInvariant)
return;
LUAU_ASSERT(isNormalizedTop(norm.tops));
LUAU_ASSERT(isNormalizedBoolean(norm.booleans));
LUAU_ASSERT(areNormalizedClasses(norm.classes));
LUAU_ASSERT(isNormalizedError(norm.errors));
LUAU_ASSERT(isNormalizedNil(norm.nils));
LUAU_ASSERT(isNormalizedNumber(norm.numbers));
LUAU_ASSERT(isNormalizedString(norm.strings));
LUAU_ASSERT(isNormalizedThread(norm.threads));
LUAU_ASSERT(areNormalizedFunctions(norm.functions));
LUAU_ASSERT(areNormalizedTables(norm.tables));
LUAU_ASSERT(isNormalizedTyvar(norm.tyvars));
for (auto& [_, child] : norm.tyvars)
assertInvariant(*child);
#endif
LUAU_ASSERT(isNormalizedTop(norm.tops));
LUAU_ASSERT(isNormalizedBoolean(norm.booleans));
LUAU_ASSERT(areNormalizedClasses(norm.classes));
LUAU_ASSERT(isNormalizedError(norm.errors));
LUAU_ASSERT(isNormalizedNil(norm.nils));
LUAU_ASSERT(isNormalizedNumber(norm.numbers));
LUAU_ASSERT(isNormalizedString(norm.strings));
LUAU_ASSERT(isNormalizedThread(norm.threads));
LUAU_ASSERT(areNormalizedFunctions(norm.functions));
LUAU_ASSERT(areNormalizedTables(norm.tables));
LUAU_ASSERT(isNormalizedTyvar(norm.tyvars));
for (auto& [_, child] : norm.tyvars)
assertInvariant(*child);
#endif
}
Normalizer::Normalizer(TypeArena* arena, NotNull<SingletonTypes> singletonTypes, NotNull<UnifierSharedState> sharedState)
@ -359,7 +351,7 @@ TypeId Normalizer::unionType(TypeId here, TypeId there)
return there;
if (get<NeverTypeVar>(there) || get<AnyTypeVar>(here))
return here;
TypeIds tmps;
if (const UnionTypeVar* utv = get<UnionTypeVar>(here))
@ -405,7 +397,7 @@ TypeId Normalizer::intersectionType(TypeId here, TypeId there)
return here;
if (get<NeverTypeVar>(there) || get<AnyTypeVar>(here))
return there;
TypeIds tmps;
if (const IntersectionTypeVar* utv = get<IntersectionTypeVar>(here))
@ -516,13 +508,13 @@ std::optional<TypePackId> Normalizer::unionOfTypePacks(TypePackId here, TypePack
std::vector<TypeId> head;
std::optional<TypePackId> tail;
bool hereSubThere = true;
bool thereSubHere = true;
TypePackIterator ith = begin(here);
TypePackIterator itt = begin(there);
while (ith != end(here) && itt != end(there))
{
TypeId hty = *ith;
@ -537,8 +529,8 @@ std::optional<TypePackId> Normalizer::unionOfTypePacks(TypePackId here, TypePack
itt++;
}
auto dealWithDifferentArities = [&](TypePackIterator& ith, TypePackIterator itt, TypePackId here, TypePackId there, bool& hereSubThere, bool& thereSubHere)
{
auto dealWithDifferentArities = [&](TypePackIterator& ith, TypePackIterator itt, TypePackId here, TypePackId there, bool& hereSubThere,
bool& thereSubHere) {
if (ith != end(here))
{
TypeId tty = singletonTypes->nilType;
@ -591,13 +583,13 @@ std::optional<TypePackId> Normalizer::unionOfTypePacks(TypePackId here, TypePack
if (ty != tvtp->ty)
hereSubThere = false;
bool hidden = hvtp->hidden & tvtp->hidden;
tail = arena->addTypePack(VariadicTypePack{ty,hidden});
tail = arena->addTypePack(VariadicTypePack{ty, hidden});
}
else
else
// Luau doesn't have unions of type pack variables
return std::nullopt;
}
else
else
// Luau doesn't have unions of type pack variables
return std::nullopt;
}
@ -627,7 +619,7 @@ std::optional<TypePackId> Normalizer::unionOfTypePacks(TypePackId here, TypePack
else if (thereSubHere)
return here;
if (!head.empty())
return arena->addTypePack(TypePack{head,tail});
return arena->addTypePack(TypePack{head, tail});
else if (tail)
return *tail;
else
@ -639,10 +631,10 @@ std::optional<TypeId> Normalizer::unionOfFunctions(TypeId here, TypeId there)
{
if (get<ErrorTypeVar>(here))
return here;
if (get<ErrorTypeVar>(there))
return there;
const FunctionTypeVar* hftv = get<FunctionTypeVar>(here);
LUAU_ASSERT(hftv);
const FunctionTypeVar* tftv = get<FunctionTypeVar>(there);
@ -665,7 +657,7 @@ std::optional<TypeId> Normalizer::unionOfFunctions(TypeId here, TypeId there)
return here;
if (*argTypes == tftv->argTypes && *retTypes == tftv->retTypes)
return there;
FunctionTypeVar result{*argTypes, *retTypes};
result.generics = hftv->generics;
result.genericPacks = hftv->genericPacks;
@ -802,9 +794,9 @@ bool Normalizer::withinResourceLimits()
// Check the recursion count
if (sharedState->counters.recursionLimit > 0)
if (sharedState->counters.recursionLimit < sharedState->counters.recursionCount)
return false;
if (sharedState->counters.recursionLimit < sharedState->counters.recursionCount)
return false;
return true;
}
@ -1000,13 +992,13 @@ std::optional<TypePackId> Normalizer::intersectionOfTypePacks(TypePackId here, T
std::vector<TypeId> head;
std::optional<TypePackId> tail;
bool hereSubThere = true;
bool thereSubHere = true;
TypePackIterator ith = begin(here);
TypePackIterator itt = begin(there);
while (ith != end(here) && itt != end(there))
{
TypeId hty = *ith;
@ -1021,8 +1013,8 @@ std::optional<TypePackId> Normalizer::intersectionOfTypePacks(TypePackId here, T
itt++;
}
auto dealWithDifferentArities = [&](TypePackIterator& ith, TypePackIterator itt, TypePackId here, TypePackId there, bool& hereSubThere, bool& thereSubHere)
{
auto dealWithDifferentArities = [&](TypePackIterator& ith, TypePackIterator itt, TypePackId here, TypePackId there, bool& hereSubThere,
bool& thereSubHere) {
if (ith != end(here))
{
TypeId tty = singletonTypes->nilType;
@ -1075,13 +1067,13 @@ std::optional<TypePackId> Normalizer::intersectionOfTypePacks(TypePackId here, T
if (ty != tvtp->ty)
hereSubThere = false;
bool hidden = hvtp->hidden & tvtp->hidden;
tail = arena->addTypePack(VariadicTypePack{ty,hidden});
tail = arena->addTypePack(VariadicTypePack{ty, hidden});
}
else
else
// Luau doesn't have unions of type pack variables
return std::nullopt;
}
else
else
// Luau doesn't have unions of type pack variables
return std::nullopt;
}
@ -1105,7 +1097,7 @@ std::optional<TypePackId> Normalizer::intersectionOfTypePacks(TypePackId here, T
else if (thereSubHere)
return there;
if (!head.empty())
return arena->addTypePack(TypePack{head,tail});
return arena->addTypePack(TypePack{head, tail});
else if (tail)
return *tail;
else
@ -1146,7 +1138,7 @@ std::optional<TypeId> Normalizer::intersectionOfTables(TypeId here, TypeId there
return std::nullopt;
if (httv->state == TableState::Generic || tttv->state == TableState::Generic)
return std::nullopt;
TableState state = httv->state;
if (tttv->state == TableState::Unsealed)
state = tttv->state;
@ -1226,21 +1218,20 @@ std::optional<TypeId> Normalizer::intersectionOfTables(TypeId here, TypeId there
}
else
return std::nullopt;
}
else if (hmtable)
{
if (table == htable)
return here;
else
return arena->addType(MetatableTypeVar{table, hmtable});
return arena->addType(MetatableTypeVar{table, hmtable});
}
else if (tmtable)
{
if (table == ttable)
return there;
else
return arena->addType(MetatableTypeVar{table, tmtable});
return arena->addType(MetatableTypeVar{table, tmtable});
}
else
return table;
@ -1280,7 +1271,7 @@ std::optional<TypeId> Normalizer::intersectionOfFunctions(TypeId here, TypeId th
return std::nullopt;
if (hftv->retTypes != tftv->retTypes)
return std::nullopt;
std::optional<TypePackId> argTypes = unionOfTypePacks(hftv->argTypes, tftv->argTypes);
if (!argTypes)
return std::nullopt;
@ -1289,7 +1280,7 @@ std::optional<TypeId> Normalizer::intersectionOfFunctions(TypeId here, TypeId th
return here;
if (*argTypes == tftv->argTypes)
return there;
FunctionTypeVar result{*argTypes, hftv->retTypes};
result.generics = hftv->generics;
result.genericPacks = hftv->genericPacks;
@ -1299,7 +1290,7 @@ std::optional<TypeId> Normalizer::intersectionOfFunctions(TypeId here, TypeId th
std::optional<TypeId> Normalizer::unionSaturatedFunctions(TypeId here, TypeId there)
{
// Deep breath...
//
//
// When we come to check overloaded functions for subtyping,
// we have to compare (F1 & ... & FM) <: (G1 & ... G GN)
// where each Fi or Gj is a function type. Now that intersection on the right is no
@ -1319,12 +1310,12 @@ std::optional<TypeId> Normalizer::unionSaturatedFunctions(TypeId here, TypeId th
//
// So subtyping on overloaded functions "just" boils down to defining Apply<F, T>.
//
// Now for non-overloaded functions, this is easy!
// Now for non-overloaded functions, this is easy!
// Apply<(R -> S), T> is S if T <: R, and an error type otherwise.
//
// But for overloaded functions it's not so simple. We'd like Apply<F1 & ... & FM, T>
// to just be Apply<F1, T> & ... & Apply<FM, T> but oh dear
//
//
// if f : ((number -> number) & (string -> string))
// and x : (number | string)
// then f(x) : (number | string)
@ -1334,7 +1325,7 @@ std::optional<TypeId> Normalizer::unionSaturatedFunctions(TypeId here, TypeId th
// Apply<((number -> number) & (string -> string)), (number | string)> is (number | string)
//
// but
//
//
// Apply<(number -> number), (number | string)> is an error
// Apply<(string -> string), (number | string)> is an error
//
@ -1382,7 +1373,7 @@ std::optional<TypeId> Normalizer::unionSaturatedFunctions(TypeId here, TypeId th
// Covariance and Contravariance, Giuseppe Castagna,
// Logical Methods in Computer Science 16(1), 2022
// https://arxiv.org/abs/1809.01427
//
//
// A gentle introduction to semantic subtyping, Giuseppe Castagna and Alain Frisch,
// Proc. Principles and practice of declarative programming 2005, pp 198208
// https://doi.org/10.1145/1069774.1069793
@ -1398,7 +1389,7 @@ std::optional<TypeId> Normalizer::unionSaturatedFunctions(TypeId here, TypeId th
return std::nullopt;
if (hftv->genericPacks != tftv->genericPacks)
return std::nullopt;
std::optional<TypePackId> argTypes = unionOfTypePacks(hftv->argTypes, tftv->argTypes);
if (!argTypes)
return std::nullopt;
@ -1416,7 +1407,7 @@ void Normalizer::intersectFunctionsWithFunction(NormalizedFunctionType& heres, T
{
if (!heres)
return;
for (auto it = heres->begin(); it != heres->end();)
{
TypeId here = *it;
@ -1450,7 +1441,7 @@ void Normalizer::intersectFunctions(NormalizedFunctionType& heres, const Normali
{
heres = std::nullopt;
return;
}
}
else
{
for (TypeId there : *theres)
@ -1530,7 +1521,7 @@ bool Normalizer::intersectNormals(NormalizedType& here, const NormalizedType& th
if (isInhabited(inter))
it++;
else
it = here.tyvars.erase(it);
it = here.tyvars.erase(it);
}
return true;
}
@ -1757,7 +1748,8 @@ bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<Singl
return ok;
}
bool isSubtype(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop)
bool isSubtype(
TypePackId subPack, TypePackId superPack, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop)
{
UnifierSharedState sharedState{&ice};
TypeArena arena;
@ -2377,4 +2369,3 @@ std::pair<TypePackId, bool> normalize(TypePackId tp, const ModulePtr& module, No
}
} // namespace Luau

View File

@ -9,7 +9,6 @@
#include <stdexcept>
LUAU_FASTFLAGVARIABLE(LuauSubstitutionFixMissingFields, false)
LUAU_FASTFLAG(LuauLowerBoundsCalculation)
LUAU_FASTFLAG(LuauClonePublicInterfaceLess)
LUAU_FASTINTVARIABLE(LuauTarjanChildLimit, 10000)
LUAU_FASTFLAGVARIABLE(LuauClassTypeVarsInSubstitution, false)
@ -553,9 +552,6 @@ TypePackId Substitution::replace(TypePackId tp)
void Substitution::replaceChildren(TypeId ty)
{
if (BoundTypeVar* btv = log->getMutable<BoundTypeVar>(ty); FFlag::LuauLowerBoundsCalculation && btv)
btv->boundTo = replace(btv->boundTo);
LUAU_ASSERT(ty == log->follow(ty));
if (ignoreChildren(ty))

View File

@ -10,11 +10,11 @@
#include <algorithm>
#include <stdexcept>
LUAU_FASTFLAG(LuauLowerBoundsCalculation)
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAGVARIABLE(LuauSpecialTypesAsterisked, false)
LUAU_FASTFLAGVARIABLE(LuauFixNameMaps, false)
LUAU_FASTFLAGVARIABLE(LuauUnseeArrayTtv, false)
LUAU_FASTFLAGVARIABLE(LuauFunctionReturnStringificationFixup, false)
/*
* Prefix generic typenames with gen-
@ -524,7 +524,7 @@ struct TypeVarStringifier
bool plural = true;
if (FFlag::LuauLowerBoundsCalculation)
if (FFlag::LuauFunctionReturnStringificationFixup)
{
auto retBegin = begin(ftv.retTypes);
auto retEnd = end(ftv.retTypes);

View File

@ -24,7 +24,7 @@ namespace Luau
// TypeInfer.h
// TODO move these
using PrintLineProc = void(*)(const std::string&);
using PrintLineProc = void (*)(const std::string&);
extern PrintLineProc luauPrintLine;
/* Push a scope onto the end of a stack for the lifetime of the StackPusher instance.
@ -127,7 +127,8 @@ struct TypeChecker2
if (auto ann = ref->parameters.data[0].type)
{
TypeId argTy = lookupAnnotation(ref->parameters.data[0].type);
luauPrintLine(format("_luau_print (%d, %d): %s\n", annotation->location.begin.line, annotation->location.begin.column, toString(argTy).c_str()));
luauPrintLine(format(
"_luau_print (%d, %d): %s\n", annotation->location.begin.line, annotation->location.begin.column, toString(argTy).c_str()));
return follow(argTy);
}
}
@ -409,8 +410,8 @@ struct TypeChecker2
}
TypeId iteratorTy = follow(iteratorTypes[0]);
auto checkFunction = [this, &arena, &scope, &forInStatement, &variableTypes](const FunctionTypeVar* iterFtv, std::vector<TypeId> iterTys, bool isMm)
{
auto checkFunction = [this, &arena, &scope, &forInStatement, &variableTypes](
const FunctionTypeVar* iterFtv, std::vector<TypeId> iterTys, bool isMm) {
if (iterTys.size() < 1 || iterTys.size() > 3)
{
if (isMm)
@ -420,20 +421,21 @@ struct TypeChecker2
return;
}
// It is okay if there aren't enough iterators, but the iteratee must provide enough.
std::vector<TypeId> expectedVariableTypes = flatten(arena, singletonTypes, iterFtv->retTypes, variableTypes.size());
if (expectedVariableTypes.size() < variableTypes.size())
{
if (isMm)
reportError(GenericError{"__iter metamethod's next() function does not return enough values"}, getLocation(forInStatement->values));
reportError(
GenericError{"__iter metamethod's next() function does not return enough values"}, getLocation(forInStatement->values));
else
reportError(GenericError{"next() does not return enough values"}, forInStatement->values.data[0]->location);
}
for (size_t i = 0; i < std::min(expectedVariableTypes.size(), variableTypes.size()); ++i)
reportErrors(tryUnify(scope, forInStatement->vars.data[i]->location, variableTypes[i], expectedVariableTypes[i]));
// nextFn is going to be invoked with (arrayTy, startIndexTy)
// It will be passed two arguments on every iteration save the
@ -509,7 +511,8 @@ struct TypeChecker2
{
// nothing
}
else if (std::optional<TypeId> iterMmTy = findMetatableEntry(singletonTypes, module->errors, iteratorTy, "__iter", forInStatement->values.data[0]->location))
else if (std::optional<TypeId> iterMmTy =
findMetatableEntry(singletonTypes, module->errors, iteratorTy, "__iter", forInStatement->values.data[0]->location))
{
Instantiation instantiation{TxnLog::empty(), &arena, TypeLevel{}, scope};
@ -554,7 +557,7 @@ struct TypeChecker2
// TODO: This will not tell the user that this is because the
// metamethod isn't callable. This is not ideal, and we should
// improve this error message.
// TODO: This will also not handle intersections of functions or
// callable tables (which are supported by the runtime).
reportError(CannotCallNonFunction{*iterMmTy}, forInStatement->values.data[0]->location);

View File

@ -33,15 +33,11 @@ LUAU_FASTINTVARIABLE(LuauVisitRecursionLimit, 500)
LUAU_FASTFLAG(LuauKnowsTheDataModel3)
LUAU_FASTFLAG(LuauAutocompleteDynamicLimits)
LUAU_FASTFLAG(LuauTypeNormalization2)
LUAU_FASTFLAGVARIABLE(LuauFunctionArgMismatchDetails, false)
LUAU_FASTFLAGVARIABLE(LuauLowerBoundsCalculation, false)
LUAU_FASTFLAGVARIABLE(DebugLuauFreezeDuringUnification, false)
LUAU_FASTFLAGVARIABLE(LuauReturnAnyInsteadOfICE, false) // Eventually removed as false.
LUAU_FASTFLAGVARIABLE(DebugLuauSharedSelf, false)
LUAU_FASTFLAGVARIABLE(LuauAnyifyModuleReturnGenerics, false)
LUAU_FASTFLAGVARIABLE(LuauUnknownAndNeverType, false)
LUAU_FASTFLAGVARIABLE(LuauCallUnifyPackTails, false)
LUAU_FASTFLAGVARIABLE(LuauCheckGenericHOFTypes, false)
LUAU_FASTFLAGVARIABLE(LuauBinaryNeedsExpectedTypesToo, false)
LUAU_FASTFLAGVARIABLE(LuauFixVarargExprHeadType, false)
LUAU_FASTFLAGVARIABLE(LuauNeverTypesAndOperatorsInference, false)
@ -136,34 +132,6 @@ bool hasBreak(AstStat* node)
}
}
static bool hasReturn(const AstStat* node)
{
struct Searcher : AstVisitor
{
bool result = false;
bool visit(AstStat*) override
{
return !result; // if we've already found a return statement, don't bother to traverse inward anymore
}
bool visit(AstStatReturn*) override
{
result = true;
return false;
}
bool visit(AstExprFunction*) override
{
return false; // We don't care if the function uses a lambda that itself returns
}
};
Searcher searcher;
const_cast<AstStat*>(node)->visit(&searcher);
return searcher.result;
}
// returns the last statement before the block exits, or nullptr if the block never exits
const AstStat* getFallthrough(const AstStat* node)
{
@ -550,16 +518,6 @@ void TypeChecker::checkBlockWithoutRecursionCheck(const ScopePtr& scope, const A
std::unordered_map<AstStat*, std::pair<TypeId, ScopePtr>> functionDecls;
auto isLocalLambda = [](AstStat* stat) -> AstStatLocal* {
AstStatLocal* local = stat->as<AstStatLocal>();
if (FFlag::LuauLowerBoundsCalculation && local && local->vars.size == 1 && local->values.size == 1 &&
local->values.data[0]->is<AstExprFunction>())
return local;
else
return nullptr;
};
auto checkBody = [&](AstStat* stat) {
if (auto fun = stat->as<AstStatFunction>())
{
@ -607,7 +565,7 @@ void TypeChecker::checkBlockWithoutRecursionCheck(const ScopePtr& scope, const A
// function f<a>(x:a):a local x: number = g(37) return x end
// function g(x:number):number return f(x) end
// ```
if (containsFunctionCallOrReturn(**protoIter) || (FFlag::LuauLowerBoundsCalculation && isLocalLambda(*protoIter)))
if (containsFunctionCallOrReturn(**protoIter))
{
while (checkIter != protoIter)
{
@ -906,12 +864,6 @@ void TypeChecker::check(const ScopePtr& scope, const AstStatReturn& return_)
TypePackId retPack = checkExprList(scope, return_.location, return_.list, false, {}, expectedTypes).type;
if (useConstrainedIntersections())
{
unifyLowerBound(retPack, scope->returnType, demoter.demotedLevel(scope->level), scope, return_.location);
return;
}
// HACK: Nonstrict mode gets a bit too smart and strict for us when we
// start typechecking everything across module boundaries.
if (isNonstrictMode() && follow(scope->returnType) == follow(currentModule->getModuleScope()->returnType))
@ -1574,11 +1526,7 @@ void TypeChecker::check(const ScopePtr& scope, const AstStatTypeAlias& typealias
for (auto param : binding->typePackParams)
clone.instantiatedTypePackParams.push_back(param.tp);
bool isNormal = ty->normal;
ty = addType(std::move(clone));
if (FFlag::LuauLowerBoundsCalculation)
asMutable(ty)->normal = isNormal;
}
}
else
@ -1605,14 +1553,6 @@ void TypeChecker::check(const ScopePtr& scope, const AstStatTypeAlias& typealias
if (unify(ty, bindingType, aliasScope, typealias.location))
bindingType = ty;
if (FFlag::LuauLowerBoundsCalculation)
{
auto [t, ok] = normalize(bindingType, currentModule, singletonTypes, *iceHandler);
bindingType = t;
if (!ok)
reportError(typealias.location, NormalizationTooComplex{});
}
}
void TypeChecker::prototype(const ScopePtr& scope, const AstStatTypeAlias& typealias, int subLevel)
@ -1959,9 +1899,8 @@ WithPredicate<TypeId> TypeChecker::checkExpr(const ScopePtr& scope, const AstExp
}
else if (const FreeTypePack* ftp = get<Unifiable::Free>(retPack))
{
TypeLevel level = FFlag::LuauLowerBoundsCalculation ? ftp->level : scope->level;
TypeId head = freshType(level);
TypePackId pack = addTypePack(TypePackVar{TypePack{{head}, freshTypePack(level)}});
TypeId head = freshType(scope->level);
TypePackId pack = addTypePack(TypePackVar{TypePack{{head}, freshTypePack(scope->level)}});
unify(pack, retPack, scope, expr.location);
return {head, std::move(result.predicates)};
}
@ -2111,27 +2050,14 @@ std::optional<TypeId> TypeChecker::getIndexTypeFromTypeImpl(
return std::nullopt;
}
if (FFlag::LuauLowerBoundsCalculation)
{
// FIXME Inefficient. We craft a UnionTypeVar and immediately throw it away.
auto [t, ok] = normalize(addType(UnionTypeVar{std::move(goodOptions)}), currentModule, singletonTypes, *iceHandler);
std::vector<TypeId> result = reduceUnion(goodOptions);
if (FFlag::LuauUnknownAndNeverType && result.empty())
return neverType;
if (!ok)
reportError(location, NormalizationTooComplex{});
if (result.size() == 1)
return result[0];
return t;
}
else
{
std::vector<TypeId> result = reduceUnion(goodOptions);
if (FFlag::LuauUnknownAndNeverType && result.empty())
return neverType;
if (result.size() == 1)
return result[0];
return addType(UnionTypeVar{std::move(result)});
}
return addType(UnionTypeVar{std::move(result)});
}
else if (const IntersectionTypeVar* itv = get<IntersectionTypeVar>(type))
{
@ -3426,13 +3352,6 @@ std::pair<TypeId, ScopePtr> TypeChecker::checkFunctionSignature(const ScopePtr&
}
}
}
if (!FFlag::LuauCheckGenericHOFTypes)
{
// We do not infer type binders, so if a generic function is required we do not propagate
if (expectedFunctionType && !(expectedFunctionType->generics.empty() && expectedFunctionType->genericPacks.empty()))
expectedFunctionType = nullptr;
}
}
auto [generics, genericPacks] = createGenericTypes(funScope, std::nullopt, expr, expr.generics, expr.genericPacks);
@ -3442,8 +3361,7 @@ std::pair<TypeId, ScopePtr> TypeChecker::checkFunctionSignature(const ScopePtr&
retPack = resolveTypePack(funScope, *expr.returnAnnotation);
else if (isNonstrictMode())
retPack = anyTypePack;
else if (expectedFunctionType &&
(!FFlag::LuauCheckGenericHOFTypes || (expectedFunctionType->generics.empty() && expectedFunctionType->genericPacks.empty())))
else if (expectedFunctionType && expectedFunctionType->generics.empty() && expectedFunctionType->genericPacks.empty())
{
auto [head, tail] = flatten(expectedFunctionType->retTypes);
@ -3488,10 +3406,6 @@ std::pair<TypeId, ScopePtr> TypeChecker::checkFunctionSignature(const ScopePtr&
funScope->varargPack = anyTypePack;
}
}
else if (FFlag::LuauLowerBoundsCalculation && !isNonstrictMode())
{
funScope->varargPack = addTypePack(TypePackVar{VariadicTypePack{anyType, /*hidden*/ true}});
}
std::vector<TypeId> argTypes;
@ -3575,48 +3489,28 @@ std::pair<TypeId, ScopePtr> TypeChecker::checkFunctionSignature(const ScopePtr&
std::vector<TypeId> genericTys;
// if we have a generic expected function type and no generics, we should use the expected ones.
if (FFlag::LuauCheckGenericHOFTypes)
if (expectedFunctionType && generics.empty())
{
if (expectedFunctionType && generics.empty())
{
genericTys = expectedFunctionType->generics;
}
else
{
genericTys.reserve(generics.size());
for (const GenericTypeDefinition& generic : generics)
genericTys.push_back(generic.ty);
}
genericTys = expectedFunctionType->generics;
}
else
{
genericTys.reserve(generics.size());
std::transform(generics.begin(), generics.end(), std::back_inserter(genericTys), [](auto&& el) {
return el.ty;
});
for (const GenericTypeDefinition& generic : generics)
genericTys.push_back(generic.ty);
}
std::vector<TypePackId> genericTps;
// if we have a generic expected function type and no generic typepacks, we should use the expected ones.
if (FFlag::LuauCheckGenericHOFTypes)
if (expectedFunctionType && genericPacks.empty())
{
if (expectedFunctionType && genericPacks.empty())
{
genericTps = expectedFunctionType->genericPacks;
}
else
{
genericTps.reserve(genericPacks.size());
for (const GenericTypePackDefinition& generic : genericPacks)
genericTps.push_back(generic.tp);
}
genericTps = expectedFunctionType->genericPacks;
}
else
{
genericTps.reserve(genericPacks.size());
std::transform(genericPacks.begin(), genericPacks.end(), std::back_inserter(genericTps), [](auto&& el) {
return el.tp;
});
for (const GenericTypePackDefinition& generic : genericPacks)
genericTps.push_back(generic.tp);
}
TypeId funTy =
@ -3674,24 +3568,9 @@ void TypeChecker::checkFunctionBody(const ScopePtr& scope, TypeId ty, const AstE
{
check(scope, *function.body);
if (useConstrainedIntersections())
{
TypePackId retPack = follow(funTy->retTypes);
// It is possible for a function to have no annotation and no return statement, and yet still have an ascribed return type
// if it is expected to conform to some other interface. (eg the function may be a lambda passed as a callback)
if (!hasReturn(function.body) && !function.returnAnnotation.has_value() && get<FreeTypePack>(retPack))
{
auto level = getLevel(retPack);
if (level && scope->level.subsumes(*level))
*asMutable(retPack) = TypePack{{}, std::nullopt};
}
}
else
{
// We explicitly don't follow here to check if we have a 'true' free type instead of bound one
if (get_if<FreeTypePack>(&funTy->retTypes->ty))
*asMutable(funTy->retTypes) = TypePack{{}, std::nullopt};
}
// We explicitly don't follow here to check if we have a 'true' free type instead of bound one
if (get_if<FreeTypePack>(&funTy->retTypes->ty))
*asMutable(funTy->retTypes) = TypePack{{}, std::nullopt};
bool reachesImplicitReturn = getFallthrough(function.body) != nullptr;
@ -3763,21 +3642,13 @@ void TypeChecker::checkArgumentList(const ScopePtr& scope, const AstExpr& funNam
if (!argLocations.empty())
location = {state.location.begin, argLocations.back().end};
if (FFlag::LuauFunctionArgMismatchDetails)
{
std::string namePath;
if (std::optional<LValue> lValue = tryGetLValue(funName))
namePath = toString(*lValue);
std::string namePath;
if (std::optional<LValue> lValue = tryGetLValue(funName))
namePath = toString(*lValue);
auto [minParams, optMaxParams] = getParameterExtents(&state.log, paramPack);
state.reportError(TypeError{location,
CountMismatch{minParams, optMaxParams, std::distance(begin(argPack), end(argPack)), CountMismatch::Context::Arg, false, namePath}});
}
else
{
size_t minParams = getParameterExtents(&state.log, paramPack).first;
state.reportError(TypeError{location, CountMismatch{minParams, std::nullopt, std::distance(begin(argPack), end(argPack))}});
}
auto [minParams, optMaxParams] = getParameterExtents(&state.log, paramPack);
state.reportError(TypeError{location,
CountMismatch{minParams, optMaxParams, std::distance(begin(argPack), end(argPack)), CountMismatch::Context::Arg, false, namePath}});
};
while (true)
@ -3801,7 +3672,7 @@ void TypeChecker::checkArgumentList(const ScopePtr& scope, const AstExpr& funNam
else
state.log.replace(*argTail, TypePackVar(TypePack{{}}));
}
else if (FFlag::LuauCallUnifyPackTails && paramTail)
else if (paramTail)
{
state.tryUnify(*argTail, *paramTail);
}
@ -3881,20 +3752,12 @@ void TypeChecker::checkArgumentList(const ScopePtr& scope, const AstExpr& funNam
std::optional<TypePackId> tail = flatten(paramPack, state.log).second;
bool isVariadic = tail && Luau::isVariadic(*tail);
if (FFlag::LuauFunctionArgMismatchDetails)
{
std::string namePath;
if (std::optional<LValue> lValue = tryGetLValue(funName))
namePath = toString(*lValue);
std::string namePath;
if (std::optional<LValue> lValue = tryGetLValue(funName))
namePath = toString(*lValue);
state.reportError(TypeError{
state.location, CountMismatch{minParams, optMaxParams, paramIndex, CountMismatch::Context::Arg, isVariadic, namePath}});
}
else
{
state.reportError(
TypeError{state.location, CountMismatch{minParams, std::nullopt, paramIndex, CountMismatch::Context::Arg, isVariadic}});
}
state.reportError(TypeError{
state.location, CountMismatch{minParams, optMaxParams, paramIndex, CountMismatch::Context::Arg, isVariadic, namePath}});
return;
}
++paramIter;
@ -3924,21 +3787,6 @@ void TypeChecker::checkArgumentList(const ScopePtr& scope, const AstExpr& funNam
}
else if (auto vtp = state.log.getMutable<VariadicTypePack>(tail))
{
if (FFlag::LuauLowerBoundsCalculation && vtp->hidden)
{
// We know that this function can technically be oversaturated, but we have its definition and we
// know that it's useless.
TypeId e = errorRecoveryType(scope);
while (argIter != endIter)
{
unify(e, *argIter, scope, state.location);
++argIter;
}
reportCountMismatchError();
return;
}
// Function is variadic and requires that all subsequent parameters
// be compatible with a type.
size_t argIndex = paramIndex;
@ -4040,21 +3888,14 @@ WithPredicate<TypePackId> TypeChecker::checkExprPackHelper(const ScopePtr& scope
}
TypePackId retPack;
if (FFlag::LuauLowerBoundsCalculation)
if (auto free = get<FreeTypeVar>(actualFunctionType))
{
retPack = freshTypePack(scope->level);
retPack = freshTypePack(free->level);
TypePackId freshArgPack = freshTypePack(free->level);
asMutable(actualFunctionType)->ty.emplace<FunctionTypeVar>(free->level, freshArgPack, retPack);
}
else
{
if (auto free = get<FreeTypeVar>(actualFunctionType))
{
retPack = freshTypePack(free->level);
TypePackId freshArgPack = freshTypePack(free->level);
asMutable(actualFunctionType)->ty.emplace<FunctionTypeVar>(free->level, freshArgPack, retPack);
}
else
retPack = freshTypePack(scope->level);
}
retPack = freshTypePack(scope->level);
// checkExpr will log the pre-instantiated type of the function.
// That's not nearly as interesting as the instantiated type, which will include details about how
@ -4214,39 +4055,13 @@ std::optional<WithPredicate<TypePackId>> TypeChecker::checkCallOverload(const Sc
// fn is one of the overloads of actualFunctionType, which
// has been instantiated, so is a monotype. We can therefore
// unify it with a monomorphic function.
if (useConstrainedIntersections())
{
// This ternary is phrased deliberately. We need ties between sibling scopes to bias toward ftv->level.
const TypeLevel level = scope->level.subsumes(ftv->level) ? scope->level : ftv->level;
TypeId r = addType(FunctionTypeVar(scope->level, argPack, retPack));
std::vector<TypeId> adjustedArgTypes;
auto it = begin(argPack);
auto endIt = end(argPack);
Widen widen{&currentModule->internalTypes, singletonTypes};
for (; it != endIt; ++it)
{
adjustedArgTypes.push_back(addType(ConstrainedTypeVar{level, {widen(*it)}}));
}
UnifierOptions options;
options.isFunctionCall = true;
unify(r, fn, scope, expr.location, options);
TypePackId adjustedArgPack = addTypePack(TypePack{std::move(adjustedArgTypes), it.tail()});
TxnLog log;
promoteTypeLevels(log, &currentModule->internalTypes, level, /*scope*/ nullptr, /*useScope*/ false, retPack);
log.commit();
*asMutable(fn) = FunctionTypeVar{level, adjustedArgPack, retPack};
return {{retPack}};
}
else
{
TypeId r = addType(FunctionTypeVar(scope->level, argPack, retPack));
UnifierOptions options;
options.isFunctionCall = true;
unify(r, fn, scope, expr.location, options);
return {{retPack}};
}
return {{retPack}};
}
std::vector<Location> metaArgLocations;
@ -4760,14 +4575,6 @@ TypeId TypeChecker::quantify(const ScopePtr& scope, TypeId ty, Location location
Luau::quantify(ty, scope->level);
else if (auto ttv = getTableType(ty); ttv && ttv->selfTy)
Luau::quantify(ty, scope->level);
if (FFlag::LuauLowerBoundsCalculation)
{
auto [t, ok] = Luau::normalize(ty, currentModule, singletonTypes, *iceHandler);
if (!ok)
reportError(location, NormalizationTooComplex{});
return t;
}
}
else
{
@ -4775,14 +4582,6 @@ TypeId TypeChecker::quantify(const ScopePtr& scope, TypeId ty, Location location
if (ftv)
Luau::quantify(ty, scope->level);
if (FFlag::LuauLowerBoundsCalculation && ftv)
{
auto [t, ok] = Luau::normalize(ty, currentModule, singletonTypes, *iceHandler);
if (!ok)
reportError(location, NormalizationTooComplex{});
return t;
}
}
return ty;
@ -4813,14 +4612,6 @@ TypeId TypeChecker::instantiate(const ScopePtr& scope, TypeId ty, Location locat
TypeId TypeChecker::anyify(const ScopePtr& scope, TypeId ty, Location location)
{
if (FFlag::LuauLowerBoundsCalculation)
{
auto [t, ok] = normalize(ty, currentModule, singletonTypes, *iceHandler);
if (!ok)
reportError(location, NormalizationTooComplex{});
ty = t;
}
Anyification anyification{&currentModule->internalTypes, scope, singletonTypes, iceHandler, anyType, anyTypePack};
std::optional<TypeId> any = anyification.substitute(ty);
if (anyification.normalizationTooComplex)
@ -4836,14 +4627,6 @@ TypeId TypeChecker::anyify(const ScopePtr& scope, TypeId ty, Location location)
TypePackId TypeChecker::anyify(const ScopePtr& scope, TypePackId ty, Location location)
{
if (FFlag::LuauLowerBoundsCalculation)
{
auto [t, ok] = normalize(ty, currentModule, singletonTypes, *iceHandler);
if (!ok)
reportError(location, NormalizationTooComplex{});
ty = t;
}
Anyification anyification{&currentModule->internalTypes, scope, singletonTypes, iceHandler, anyType, anyTypePack};
std::optional<TypePackId> any = anyification.substitute(ty);
if (any.has_value())
@ -6083,11 +5866,6 @@ bool TypeChecker::isNonstrictMode() const
return (currentModule->mode == Mode::Nonstrict) || (currentModule->mode == Mode::NoCheck);
}
bool TypeChecker::useConstrainedIntersections() const
{
return FFlag::LuauLowerBoundsCalculation && !isNonstrictMode();
}
std::vector<TypeId> TypeChecker::unTypePack(const ScopePtr& scope, TypePackId tp, size_t expectedLength, const Location& location)
{
TypePackId expectedTypePack = addTypePack({});

View File

@ -6,8 +6,6 @@
#include "Luau/ToString.h"
#include "Luau/TypeInfer.h"
LUAU_FASTFLAG(LuauFunctionArgMismatchDetails)
namespace Luau
{
@ -218,7 +216,7 @@ std::pair<size_t, std::optional<size_t>> getParameterExtents(const TxnLog* log,
++it;
}
if (it.tail() && (!FFlag::LuauFunctionArgMismatchDetails || isVariadicTail(*it.tail(), *log, includeHiddenVariadics)))
if (it.tail() && isVariadicTail(*it.tail(), *log, includeHiddenVariadics))
return {minCount, std::nullopt};
else
return {minCount, minCount + optionalCount};

View File

@ -25,7 +25,6 @@ LUAU_FASTINTVARIABLE(LuauTableTypeMaximumStringifierLength, 0)
LUAU_FASTINT(LuauTypeInferRecursionLimit)
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAGVARIABLE(LuauMaybeGenericIntersectionTypes, false)
LUAU_FASTFLAGVARIABLE(LuauStringFormatArgumentErrorFix, false)
LUAU_FASTFLAGVARIABLE(LuauNoMoreGlobalSingletonTypes, false)
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
@ -1166,21 +1165,12 @@ std::optional<WithPredicate<TypePackId>> magicFunctionFormat(
}
// if we know the argument count or if we have too many arguments for sure, we can issue an error
if (FFlag::LuauStringFormatArgumentErrorFix)
{
size_t numActualParams = params.size();
size_t numExpectedParams = expected.size() + 1; // + 1 for the format string
size_t numActualParams = params.size();
size_t numExpectedParams = expected.size() + 1; // + 1 for the format string
if (numExpectedParams != numActualParams && (!tail || numExpectedParams < numActualParams))
typechecker.reportError(TypeError{expr.location, CountMismatch{numExpectedParams, std::nullopt, numActualParams}});
}
else
{
size_t actualParamSize = params.size() - paramOffset;
if (numExpectedParams != numActualParams && (!tail || numExpectedParams < numActualParams))
typechecker.reportError(TypeError{expr.location, CountMismatch{numExpectedParams, std::nullopt, numActualParams}});
if (expected.size() != actualParamSize && (!tail || expected.size() < actualParamSize))
typechecker.reportError(TypeError{expr.location, CountMismatch{expected.size(), std::nullopt, actualParamSize}});
}
return WithPredicate<TypePackId>{arena.addTypePack({typechecker.stringType})};
}

View File

@ -18,14 +18,12 @@ LUAU_FASTINT(LuauTypeInferTypePackLoopLimit);
LUAU_FASTINT(LuauTypeInferIterationLimit);
LUAU_FASTFLAG(LuauAutocompleteDynamicLimits)
LUAU_FASTINTVARIABLE(LuauTypeInferLowerBoundsIterationLimit, 2000);
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
LUAU_FASTFLAG(LuauErrorRecoveryType);
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAGVARIABLE(LuauSubtypeNormalizer, false);
LUAU_FASTFLAGVARIABLE(LuauScalarShapeSubtyping, false)
LUAU_FASTFLAGVARIABLE(LuauInstantiateInSubtyping, false)
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution)
LUAU_FASTFLAG(LuauCallUnifyPackTails)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
namespace Luau
@ -346,8 +344,7 @@ static bool subsumes(bool useScopes, TY_A* left, TY_B* right)
return left->level.subsumes(right->level);
}
Unifier::Unifier(NotNull<Normalizer> normalizer, Mode mode, NotNull<Scope> scope, const Location& location,
Variance variance, TxnLog* parentLog)
Unifier::Unifier(NotNull<Normalizer> normalizer, Mode mode, NotNull<Scope> scope, const Location& location, Variance variance, TxnLog* parentLog)
: types(normalizer->arena)
, singletonTypes(normalizer->singletonTypes)
, normalizer(normalizer)
@ -529,7 +526,7 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
{
tryUnifyUnionWithType(subTy, subUnion, superTy);
}
else if (const UnionTypeVar* uv = (FFlag::LuauSubtypeNormalizer? nullptr: log.getMutable<UnionTypeVar>(superTy)))
else if (const UnionTypeVar* uv = (FFlag::LuauSubtypeNormalizer ? nullptr : log.getMutable<UnionTypeVar>(superTy)))
{
tryUnifyTypeWithUnion(subTy, superTy, uv, cacheEnabled, isFunctionCall);
}
@ -865,7 +862,8 @@ void Unifier::tryUnifyIntersectionWithType(TypeId subTy, const IntersectionTypeV
}
}
void Unifier::tryUnifyNormalizedTypes(TypeId subTy, TypeId superTy, const NormalizedType& subNorm, const NormalizedType& superNorm, std::string reason, std::optional<TypeError> error)
void Unifier::tryUnifyNormalizedTypes(
TypeId subTy, TypeId superTy, const NormalizedType& subNorm, const NormalizedType& superNorm, std::string reason, std::optional<TypeError> error)
{
LUAU_ASSERT(FFlag::LuauSubtypeNormalizer);
@ -1371,12 +1369,12 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
else
{
// A union type including nil marks an optional argument
if ((!FFlag::LuauLowerBoundsCalculation || isNonstrictMode()) && superIter.good() && isOptional(*superIter))
if (superIter.good() && isOptional(*superIter))
{
superIter.advance();
continue;
}
else if ((!FFlag::LuauLowerBoundsCalculation || isNonstrictMode()) && subIter.good() && isOptional(*subIter))
else if (subIter.good() && isOptional(*subIter))
{
subIter.advance();
continue;
@ -1394,7 +1392,7 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
return;
}
if ((!FFlag::LuauLowerBoundsCalculation || isNonstrictMode()) && !isFunctionCall && subIter.good())
if (!isFunctionCall && subIter.good())
{
// Sometimes it is ok to pass too many arguments
return;
@ -1491,7 +1489,6 @@ void Unifier::tryUnifyFunctions(TypeId subTy, TypeId superTy, bool isFunctionCal
numGenerics = std::min(superFunction->generics.size(), subFunction->generics.size());
numGenericPacks = std::min(superFunction->genericPacks.size(), subFunction->genericPacks.size());
}
else
{
@ -2012,7 +2009,8 @@ void Unifier::tryUnifyWithMetatable(TypeId subTy, TypeId superTy, bool reversed)
if (auto e = hasUnificationTooComplex(innerState.errors))
reportError(*e);
else if (!innerState.errors.empty())
reportError(TypeError{location, TypeMismatch{reversed ? subTy : superTy, reversed ? superTy : subTy, "", innerState.errors.front()}});
reportError(
TypeError{location, TypeMismatch{reversed ? subTy : superTy, reversed ? superTy : subTy, "", innerState.errors.front()}});
else if (!missingProperty)
{
log.concat(std::move(innerState.log));
@ -2448,8 +2446,7 @@ void Unifier::unifyLowerBound(TypePackId subTy, TypePackId superTy, TypeLevel de
for (; superIter != superEndIter; ++superIter)
tp->head.push_back(*superIter);
}
else if (const VariadicTypePack* subVariadic = log.getMutable<VariadicTypePack>(subTailPack);
subVariadic && FFlag::LuauCallUnifyPackTails)
else if (const VariadicTypePack* subVariadic = log.getMutable<VariadicTypePack>(subTailPack))
{
while (superIter != superEndIter)
{

View File

@ -1,17 +1,13 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include <vector>
#include <string>
#include <stdarg.h>
#if defined(__GNUC__)
#define LUAU_PRINTF_ATTR(fmt, arg) __attribute__((format(printf, fmt, arg)))
#else
#define LUAU_PRINTF_ATTR(fmt, arg)
#endif
namespace Luau
{

View File

@ -4,6 +4,7 @@
#include "lua.h"
#include "lualib.h"
#include "Luau/CodeGen.h"
#include "Luau/Compiler.h"
#include "Luau/BytecodeBuilder.h"
#include "Luau/Parser.h"
@ -46,11 +47,15 @@ enum class CompileFormat
{
Text,
Binary,
Remarks,
Codegen,
Null
};
constexpr int MaxTraversalLimit = 50;
static bool codegen = false;
// Ctrl-C handling
static void sigintCallback(lua_State* L, int gc)
{
@ -159,6 +164,9 @@ static int lua_require(lua_State* L)
std::string bytecode = Luau::compile(*source, copts());
if (luau_load(ML, chunkname.c_str(), bytecode.data(), bytecode.size(), 0) == 0)
{
if (codegen)
Luau::CodeGen::compile(ML, -1);
if (coverageActive())
coverageTrack(ML, -1);
@ -242,6 +250,9 @@ static int lua_callgrind(lua_State* L)
void setupState(lua_State* L)
{
if (codegen)
Luau::CodeGen::create(L);
luaL_openlibs(L);
static const luaL_Reg funcs[] = {
@ -276,6 +287,9 @@ std::string runCode(lua_State* L, const std::string& source)
return error;
}
if (codegen)
Luau::CodeGen::compile(L, -1);
lua_State* T = lua_newthread(L);
lua_pushvalue(L, -2);
@ -604,6 +618,9 @@ static bool runFile(const char* name, lua_State* GL, bool repl)
if (luau_load(L, chunkname.c_str(), bytecode.data(), bytecode.size(), 0) == 0)
{
if (codegen)
Luau::CodeGen::compile(L, -1);
if (coverageActive())
coverageTrack(L, -1);
@ -656,6 +673,20 @@ static void reportError(const char* name, const Luau::CompileError& error)
report(name, error.getLocation(), "CompileError", error.what());
}
static std::string getCodegenAssembly(const char* name, const std::string& bytecode)
{
std::unique_ptr<lua_State, void (*)(lua_State*)> globalState(luaL_newstate(), lua_close);
lua_State* L = globalState.get();
setupState(L);
if (luau_load(L, name, bytecode.data(), bytecode.size(), 0) == 0)
return Luau::CodeGen::getAssemblyText(L, -1);
fprintf(stderr, "Error loading bytecode %s\n", name);
return "";
}
static bool compileFile(const char* name, CompileFormat format)
{
std::optional<std::string> source = readFile(name);
@ -675,6 +706,11 @@ static bool compileFile(const char* name, CompileFormat format)
Luau::BytecodeBuilder::Dump_Remarks);
bcb.setDumpSource(*source);
}
else if (format == CompileFormat::Remarks)
{
bcb.setDumpFlags(Luau::BytecodeBuilder::Dump_Source | Luau::BytecodeBuilder::Dump_Remarks);
bcb.setDumpSource(*source);
}
Luau::compileOrThrow(bcb, *source, copts());
@ -683,9 +719,15 @@ static bool compileFile(const char* name, CompileFormat format)
case CompileFormat::Text:
printf("%s", bcb.dumpEverything().c_str());
break;
case CompileFormat::Remarks:
printf("%s", bcb.dumpSourceRemarks().c_str());
break;
case CompileFormat::Binary:
fwrite(bcb.getBytecode().data(), 1, bcb.getBytecode().size(), stdout);
break;
case CompileFormat::Codegen:
printf("%s", getCodegenAssembly(name, bcb.getBytecode()).c_str());
break;
case CompileFormat::Null:
break;
}
@ -713,7 +755,7 @@ static void displayHelp(const char* argv0)
printf("\n");
printf("Available modes:\n");
printf(" omitted: compile and run input files one by one\n");
printf(" --compile[=format]: compile input files and output resulting formatted bytecode (binary or text)\n");
printf(" --compile[=format]: compile input files and output resulting formatted bytecode (binary, text, remarks, codegen or null)\n");
printf("\n");
printf("Available options:\n");
printf(" --coverage: collect code coverage while running the code and output results to coverage.out\n");
@ -723,6 +765,7 @@ static void displayHelp(const char* argv0)
printf(" -g<n>: compile with debug level n (default 1, n should be between 0 and 2).\n");
printf(" --profile[=N]: profile the code using N Hz sampling (default 10000) and output results to profile.out\n");
printf(" --timetrace: record compiler time tracing information into trace.json\n");
printf(" --codegen: execute code using native code generation\n");
}
static int assertionHandler(const char* expr, const char* file, int line, const char* function)
@ -761,6 +804,14 @@ int replMain(int argc, char** argv)
{
compileFormat = CompileFormat::Text;
}
else if (strcmp(argv[1], "--compile=remarks") == 0)
{
compileFormat = CompileFormat::Remarks;
}
else if (strcmp(argv[1], "--compile=codegen") == 0)
{
compileFormat = CompileFormat::Codegen;
}
else if (strcmp(argv[1], "--compile=null") == 0)
{
compileFormat = CompileFormat::Null;
@ -811,6 +862,10 @@ int replMain(int argc, char** argv)
{
profile = atoi(argv[i] + 10);
}
else if (strcmp(argv[i], "--codegen") == 0)
{
codegen = true;
}
else if (strcmp(argv[i], "--coverage") == 0)
{
coverage = true;
@ -839,12 +894,26 @@ int replMain(int argc, char** argv)
}
#endif
#if !LUA_CUSTOM_EXECUTION
if (codegen)
{
fprintf(stderr, "To run with --codegen, Luau has to be built with LUA_CUSTOM_EXECUTION enabled\n");
return 1;
}
#endif
const std::vector<std::string> files = getSourceFiles(argc, argv);
if (mode == CliMode::Unknown)
{
mode = files.empty() ? CliMode::Repl : CliMode::RunSourceFiles;
}
if (mode != CliMode::Compile && codegen && !Luau::CodeGen::isSupported())
{
fprintf(stderr, "Cannot enable --codegen, native code generation is not supported in current configuration\n");
return 1;
}
switch (mode)
{
case CliMode::Compile:

View File

@ -12,6 +12,7 @@ option(LUAU_BUILD_WEB "Build Web module" OFF)
option(LUAU_WERROR "Warnings as errors" OFF)
option(LUAU_STATIC_CRT "Link with the static CRT (/MT)" OFF)
option(LUAU_EXTERN_C "Use extern C for all APIs" OFF)
option(LUAU_NATIVE "Enable support for native code generation" OFF)
if(LUAU_STATIC_CRT)
cmake_minimum_required(VERSION 3.15)
@ -132,6 +133,10 @@ if(LUAU_EXTERN_C)
target_compile_definitions(Luau.Compiler PUBLIC LUACODE_API=extern\"C\")
endif()
if(LUAU_NATIVE)
target_compile_definitions(Luau.VM PUBLIC LUA_CUSTOM_EXECUTION=1)
endif()
if (MSVC AND MSVC_VERSION GREATER_EQUAL 1924)
# disable partial redundancy elimination which regresses interpreter codegen substantially in VS2022:
# https://developercommunity.visualstudio.com/t/performance-regression-on-a-complex-interpreter-lo/1631863
@ -167,7 +172,7 @@ if(LUAU_BUILD_CLI)
target_include_directories(Luau.Repl.CLI PRIVATE extern extern/isocline/include)
target_link_libraries(Luau.Repl.CLI PRIVATE Luau.Compiler Luau.VM isocline)
target_link_libraries(Luau.Repl.CLI PRIVATE Luau.Compiler Luau.CodeGen Luau.VM isocline)
if(UNIX)
find_library(LIBPTHREAD pthread)
@ -193,11 +198,11 @@ if(LUAU_BUILD_TESTS)
target_compile_options(Luau.Conformance PRIVATE ${LUAU_OPTIONS})
target_include_directories(Luau.Conformance PRIVATE extern)
target_link_libraries(Luau.Conformance PRIVATE Luau.Analysis Luau.Compiler Luau.VM)
target_link_libraries(Luau.Conformance PRIVATE Luau.Analysis Luau.Compiler Luau.CodeGen Luau.VM)
target_compile_options(Luau.CLI.Test PRIVATE ${LUAU_OPTIONS})
target_include_directories(Luau.CLI.Test PRIVATE extern CLI)
target_link_libraries(Luau.CLI.Test PRIVATE Luau.Compiler Luau.VM isocline)
target_link_libraries(Luau.CLI.Test PRIVATE Luau.Compiler Luau.CodeGen Luau.VM isocline)
if(UNIX)
find_library(LIBPTHREAD pthread)
if (LIBPTHREAD)

View File

@ -15,6 +15,14 @@ namespace Luau
namespace CodeGen
{
enum class RoundingModeX64
{
RoundToNearestEven = 0b00,
RoundToNegativeInfinity = 0b01,
RoundToPositiveInfinity = 0b10,
RoundToZero = 0b11,
};
class AssemblyBuilderX64
{
public:
@ -48,6 +56,8 @@ public:
void imul(OperandX64 op);
void neg(OperandX64 op);
void not_(OperandX64 op);
void dec(OperandX64 op);
void inc(OperandX64 op);
// Additional forms of imul
void imul(OperandX64 lhs, OperandX64 rhs);
@ -82,13 +92,12 @@ public:
void vxorpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vcomisd(OperandX64 src1, OperandX64 src2);
void vucomisd(OperandX64 src1, OperandX64 src2);
void vcvttsd2si(OperandX64 dst, OperandX64 src);
void vcvtsi2sd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vroundsd(OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t mode);
void vroundsd(OperandX64 dst, OperandX64 src1, OperandX64 src2, RoundingModeX64 roundingMode); // inexact
void vsqrtpd(OperandX64 dst, OperandX64 src);
void vsqrtps(OperandX64 dst, OperandX64 src);
@ -120,6 +129,8 @@ public:
OperandX64 f32x4(float x, float y, float z, float w);
OperandX64 bytes(const void* ptr, size_t size, size_t align = 8);
void logAppend(const char* fmt, ...) LUAU_PRINTF_ATTR(2, 3);
// Resulting data and code that need to be copied over one after the other
// The *end* of 'data' has to be aligned to 16 bytes, this will also align 'code'
std::vector<uint8_t> data;
@ -127,6 +138,8 @@ public:
std::string text;
const bool logText = false;
private:
// Instruction archetypes
void placeBinary(const char* name, OperandX64 lhs, OperandX64 rhs, uint8_t codeimm8, uint8_t codeimm, uint8_t codeimmImm8, uint8_t code8rev,
@ -178,7 +191,6 @@ private:
LUAU_NOINLINE void log(Label label);
LUAU_NOINLINE void log(const char* opcode, Label label);
void log(OperandX64 op);
void logAppend(const char* fmt, ...);
const char* getSizeName(SizeX64 size);
const char* getRegisterName(RegisterX64 reg);
@ -187,7 +199,6 @@ private:
std::vector<Label> pendingLabels;
std::vector<uint32_t> labelLocations;
bool logText = false;
bool finalized = false;
size_t dataPos = 0;

View File

@ -2,8 +2,9 @@
#pragma once
#include <vector>
#include <stdint.h>
#include <stddef.h>
#include <stdint.h>
namespace Luau
{

View File

@ -0,0 +1,24 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <string>
struct lua_State;
namespace Luau
{
namespace CodeGen
{
bool isSupported();
void create(lua_State* L);
// Builds target function and all inner functions
void compile(lua_State* L, int idx);
// Generates assembly text for target function and all inner functions
std::string getAssemblyText(lua_State* L, int idx);
} // namespace CodeGen
} // namespace Luau

View File

@ -37,6 +37,9 @@ enum class Condition
Zero,
NotZero,
Parity,
NotParity,
Count
};

View File

@ -14,9 +14,13 @@ namespace CodeGen
// TODO: more assertions on operand sizes
const uint8_t codeForCondition[] = {
0x0, 0x1, 0x2, 0x3, 0x2, 0x6, 0x7, 0x3, 0x4, 0xc, 0xe, 0xf, 0xd, 0x3, 0x7, 0x6, 0x2, 0x5, 0xd, 0xf, 0xe, 0xc, 0x4, 0x5};
0x0, 0x1, 0x2, 0x3, 0x2, 0x6, 0x7, 0x3, 0x4, 0xc, 0xe, 0xf, 0xd, 0x3, 0x7, 0x6, 0x2, 0x5, 0xd, 0xf, 0xe, 0xc, 0x4, 0x5, 0xa, 0xb};
static_assert(sizeof(codeForCondition) / sizeof(codeForCondition[0]) == size_t(Condition::Count), "all conditions have to be covered");
const char* textForCondition[] = {"jo", "jno", "jc", "jnc", "jb", "jbe", "ja", "jae", "je", "jl", "jle", "jg", "jge", "jnb", "jnbe", "jna", "jnae",
"jne", "jnl", "jnle", "jng", "jnge", "jz", "jnz", "jp", "jnp"};
static_assert(sizeof(textForCondition) / sizeof(textForCondition[0]) == size_t(Condition::Count), "all conditions have to be covered");
#define OP_PLUS_REG(op, reg) ((op) + (reg & 0x7))
#define OP_PLUS_CC(op, cc) ((op) + uint8_t(cc))
@ -48,6 +52,8 @@ const unsigned AVX_F2 = 0b11;
const unsigned kMaxAlign = 16;
const uint8_t kRoundingPrecisionInexact = 0b1000;
AssemblyBuilderX64::AssemblyBuilderX64(bool logText)
: logText(logText)
{
@ -255,6 +261,16 @@ void AssemblyBuilderX64::not_(OperandX64 op)
placeUnaryModRegMem("not", op, 0xf6, 0xf7, 2);
}
void AssemblyBuilderX64::dec(OperandX64 op)
{
placeUnaryModRegMem("dec", op, 0xfe, 0xff, 1);
}
void AssemblyBuilderX64::inc(OperandX64 op)
{
placeUnaryModRegMem("inc", op, 0xfe, 0xff, 0);
}
void AssemblyBuilderX64::imul(OperandX64 lhs, OperandX64 rhs)
{
if (logText)
@ -338,7 +354,7 @@ void AssemblyBuilderX64::ret()
void AssemblyBuilderX64::jcc(Condition cond, Label& label)
{
placeJcc("je", label, codeForCondition[size_t(cond)]);
placeJcc(textForCondition[size_t(cond)], label, codeForCondition[size_t(cond)]);
}
void AssemblyBuilderX64::jmp(Label& label)
@ -442,11 +458,6 @@ void AssemblyBuilderX64::vxorpd(OperandX64 dst, OperandX64 src1, OperandX64 src2
placeAvx("vxorpd", dst, src1, src2, 0x57, false, AVX_0F, AVX_66);
}
void AssemblyBuilderX64::vcomisd(OperandX64 src1, OperandX64 src2)
{
placeAvx("vcomisd", src1, src2, 0x2f, false, AVX_0F, AVX_66);
}
void AssemblyBuilderX64::vucomisd(OperandX64 src1, OperandX64 src2)
{
placeAvx("vucomisd", src1, src2, 0x2e, false, AVX_0F, AVX_66);
@ -462,9 +473,9 @@ void AssemblyBuilderX64::vcvtsi2sd(OperandX64 dst, OperandX64 src1, OperandX64 s
placeAvx("vcvtsi2sd", dst, src1, src2, 0x2a, (src2.cat == CategoryX64::reg ? src2.base.size : src2.memSize) == SizeX64::dword, AVX_0F, AVX_F2);
}
void AssemblyBuilderX64::vroundsd(OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t mode)
void AssemblyBuilderX64::vroundsd(OperandX64 dst, OperandX64 src1, OperandX64 src2, RoundingModeX64 roundingMode)
{
placeAvx("vroundsd", dst, src1, src2, mode, 0x0b, false, AVX_0F3A, AVX_66);
placeAvx("vroundsd", dst, src1, src2, uint8_t(roundingMode) | kRoundingPrecisionInexact, 0x0b, false, AVX_0F3A, AVX_66);
}
void AssemblyBuilderX64::vsqrtpd(OperandX64 dst, OperandX64 src)
@ -534,6 +545,8 @@ void AssemblyBuilderX64::finalize()
// Resolve jump targets
for (Label fixup : pendingLabels)
{
// If this assertion fires, a label was used in jmp without calling setLabel
LUAU_ASSERT(labelLocations[fixup.id - 1] != ~0u);
uint32_t value = labelLocations[fixup.id - 1] - (fixup.location + 4);
writeu32(&code[fixup.location], value);
}
@ -552,7 +565,7 @@ void AssemblyBuilderX64::finalize()
Label AssemblyBuilderX64::setLabel()
{
Label label{nextLabel++, getCodeSize()};
labelLocations.push_back(0);
labelLocations.push_back(~0u);
if (logText)
log(label);
@ -565,7 +578,7 @@ void AssemblyBuilderX64::setLabel(Label& label)
if (label.id == 0)
{
label.id = nextLabel++;
labelLocations.push_back(0);
labelLocations.push_back(~0u);
}
label.location = getCodeSize();
@ -1019,7 +1032,7 @@ void AssemblyBuilderX64::placeLabel(Label& label)
if (label.id == 0)
{
label.id = nextLabel++;
labelLocations.push_back(0);
labelLocations.push_back(~0u);
}
pendingLabels.push_back({label.id, getCodeSize()});

449
CodeGen/src/CodeGen.cpp Normal file
View File

@ -0,0 +1,449 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/CodeGen.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/Common.h"
#include "Luau/CodeAllocator.h"
#include "Luau/CodeBlockUnwind.h"
#include "Luau/UnwindBuilder.h"
#include "Luau/UnwindBuilderDwarf2.h"
#include "Luau/UnwindBuilderWin.h"
#include "CustomExecUtils.h"
#include "CodeGenX64.h"
#include "EmitCommonX64.h"
#include "EmitInstructionX64.h"
#include "NativeState.h"
#include "lapi.h"
#include <memory>
#if defined(__x86_64__) || defined(_M_X64)
#ifdef _MSC_VER
#include <intrin.h> // __cpuid
#else
#include <cpuid.h> // __cpuid
#endif
#endif
namespace Luau
{
namespace CodeGen
{
static NativeProto* assembleFunction(AssemblyBuilderX64& build, NativeState& data, Proto* proto)
{
NativeProto* result = new NativeProto();
result->proto = proto;
if (build.logText)
{
if (proto->debugname)
build.logAppend("; function %s()", getstr(proto->debugname));
else
build.logAppend("; function()");
if (proto->linedefined >= 0)
build.logAppend(" line %d\n", proto->linedefined);
else
build.logAppend("\n");
}
std::vector<Label> instLabels;
instLabels.resize(proto->sizecode);
Label start = build.setLabel();
for (int i = 0; i < proto->sizecode;)
{
const Instruction* pc = &proto->code[i];
LuauOpcode op = LuauOpcode(LUAU_INSN_OP(*pc));
build.setLabel(instLabels[i]);
if (build.logText)
build.logAppend("; #%d: %s\n", i, data.names[op]);
switch (op)
{
case LOP_NOP:
break;
case LOP_LOADNIL:
emitInstLoadNil(build, data, pc);
break;
case LOP_LOADB:
emitInstLoadB(build, data, pc, i, instLabels.data());
break;
case LOP_LOADN:
emitInstLoadN(build, data, pc);
break;
case LOP_LOADK:
emitInstLoadK(build, data, pc, proto->k);
break;
case LOP_MOVE:
emitInstMove(build, data, pc);
break;
case LOP_GETTABLE:
emitInstGetTable(build, pc, i);
break;
case LOP_SETTABLE:
emitInstSetTable(build, pc, i);
break;
case LOP_GETTABLEN:
emitInstGetTableN(build, pc, i);
break;
case LOP_SETTABLEN:
emitInstSetTableN(build, pc, i);
break;
case LOP_JUMP:
emitInstJump(build, data, pc, i, instLabels.data());
break;
case LOP_JUMPBACK:
emitInstJumpBack(build, data, pc, i, instLabels.data());
break;
case LOP_JUMPIF:
emitInstJumpIf(build, data, pc, i, instLabels.data(), /* not_ */ false);
break;
case LOP_JUMPIFNOT:
emitInstJumpIf(build, data, pc, i, instLabels.data(), /* not_ */ true);
break;
case LOP_JUMPIFEQ:
emitInstJumpIfEq(build, data, pc, i, instLabels.data(), /* not_ */ false);
break;
case LOP_JUMPIFLE:
emitInstJumpIfCond(build, data, pc, i, instLabels.data(), Condition::LessEqual);
break;
case LOP_JUMPIFLT:
emitInstJumpIfCond(build, data, pc, i, instLabels.data(), Condition::Less);
break;
case LOP_JUMPIFNOTEQ:
emitInstJumpIfEq(build, data, pc, i, instLabels.data(), /* not_ */ true);
break;
case LOP_JUMPIFNOTLE:
emitInstJumpIfCond(build, data, pc, i, instLabels.data(), Condition::NotLessEqual);
break;
case LOP_JUMPIFNOTLT:
emitInstJumpIfCond(build, data, pc, i, instLabels.data(), Condition::NotLess);
break;
case LOP_JUMPX:
emitInstJumpX(build, data, pc, i, instLabels.data());
break;
case LOP_JUMPXEQKNIL:
emitInstJumpxEqNil(build, data, pc, proto->k, i, instLabels.data());
break;
case LOP_JUMPXEQKB:
emitInstJumpxEqB(build, data, pc, proto->k, i, instLabels.data());
break;
case LOP_JUMPXEQKN:
emitInstJumpxEqN(build, data, pc, proto->k, i, instLabels.data());
break;
case LOP_JUMPXEQKS:
emitInstJumpxEqS(build, data, pc, proto->k, i, instLabels.data());
break;
case LOP_ADD:
emitInstAdd(build, pc, i);
break;
case LOP_SUB:
emitInstSub(build, pc, i);
break;
case LOP_MUL:
emitInstMul(build, pc, i);
break;
case LOP_DIV:
emitInstDiv(build, pc, i);
break;
case LOP_MOD:
emitInstMod(build, pc, i);
break;
case LOP_POW:
emitInstPow(build, pc, i);
break;
case LOP_ADDK:
emitInstAddK(build, pc, proto->k, i);
break;
case LOP_SUBK:
emitInstSubK(build, pc, proto->k, i);
break;
case LOP_MULK:
emitInstMulK(build, pc, proto->k, i);
break;
case LOP_DIVK:
emitInstDivK(build, pc, proto->k, i);
break;
case LOP_MODK:
emitInstModK(build, pc, proto->k, i);
break;
case LOP_POWK:
emitInstPowK(build, pc, proto->k, i);
break;
case LOP_NOT:
emitInstNot(build, pc);
break;
case LOP_MINUS:
emitInstMinus(build, pc, i);
break;
case LOP_LENGTH:
emitInstLength(build, pc, i);
break;
case LOP_GETUPVAL:
emitInstGetUpval(build, pc, i);
break;
case LOP_FASTCALL:
emitInstFastCall(build, pc, i, instLabels.data());
break;
case LOP_FASTCALL1:
emitInstFastCall1(build, pc, i, instLabels.data());
break;
case LOP_FASTCALL2:
emitInstFastCall2(build, pc, i, instLabels.data());
break;
case LOP_FASTCALL2K:
emitInstFastCall2K(build, pc, proto->k, i, instLabels.data());
break;
case LOP_FORNPREP:
emitInstForNPrep(build, pc, i, instLabels.data());
break;
case LOP_FORNLOOP:
emitInstForNLoop(build, pc, i, instLabels.data());
break;
case LOP_AND:
emitInstAnd(build, pc);
break;
case LOP_ANDK:
emitInstAndK(build, pc);
break;
case LOP_OR:
emitInstOr(build, pc);
break;
case LOP_ORK:
emitInstOrK(build, pc);
break;
default:
emitFallback(build, data, op, i);
break;
}
i += getOpLength(op);
LUAU_ASSERT(i <= proto->sizecode);
}
result->instTargets = new uintptr_t[proto->sizecode];
for (int i = 0; i < proto->sizecode; i++)
result->instTargets[i] = instLabels[i].location - start.location;
result->location = start.location;
if (build.logText)
build.logAppend("\n");
return result;
}
static void destroyNativeProto(NativeProto* nativeProto)
{
delete[] nativeProto->instTargets;
delete nativeProto;
}
static void onCloseState(lua_State* L)
{
destroyNativeState(L);
}
static void onDestroyFunction(lua_State* L, Proto* proto)
{
NativeProto* nativeProto = getProtoExecData(proto);
LUAU_ASSERT(nativeProto->proto == proto);
setProtoExecData(proto, nullptr);
destroyNativeProto(nativeProto);
}
static int onEnter(lua_State* L, Proto* proto)
{
if (L->singlestep)
return 1;
NativeState* data = getNativeState(L);
if (!L->ci->savedpc)
L->ci->savedpc = proto->code;
// We will jump into native code through a gateway
bool (*gate)(lua_State*, Proto*, uintptr_t, NativeContext*) = (bool (*)(lua_State*, Proto*, uintptr_t, NativeContext*))data->context.gateEntry;
NativeProto* nativeProto = getProtoExecData(proto);
uintptr_t target = nativeProto->instTargets[L->ci->savedpc - proto->code];
// Returns 1 to finish the function in the VM
return gate(L, proto, target, &data->context);
}
static void onSetBreakpoint(lua_State* L, Proto* proto, int instruction)
{
if (!getProtoExecData(proto))
return;
LUAU_ASSERT(!"native breakpoints are not implemented");
}
bool isSupported()
{
#if !LUA_CUSTOM_EXECUTION
return false;
#elif defined(__x86_64__) || defined(_M_X64)
if (LUA_EXTRA_SIZE != 1)
return false;
if (sizeof(TValue) != 16)
return false;
if (sizeof(LuaNode) != 32)
return false;
int cpuinfo[4] = {};
#ifdef _MSC_VER
__cpuid(cpuinfo, 1);
#else
__cpuid(1, cpuinfo[0], cpuinfo[1], cpuinfo[2], cpuinfo[3]);
#endif
// We require AVX1 support for VEX encoded XMM operations
// We also requre SSE4.1 support for ROUNDSD but the AVX check below covers it
// https://en.wikipedia.org/wiki/CPUID#EAX=1:_Processor_Info_and_Feature_Bits
if ((cpuinfo[2] & (1 << 28)) == 0)
return false;
return true;
#else
return false;
#endif
}
void create(lua_State* L)
{
LUAU_ASSERT(isSupported());
NativeState& data = *createNativeState(L);
#if defined(_WIN32)
data.unwindBuilder = std::make_unique<UnwindBuilderWin>();
#else
data.unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
#endif
data.codeAllocator.context = data.unwindBuilder.get();
data.codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
data.codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
initFallbackTable(data);
initHelperFunctions(data);
if (!x64::initEntryFunction(data))
{
destroyNativeState(L);
return;
}
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
ecb->close = onCloseState;
ecb->destroy = onDestroyFunction;
ecb->enter = onEnter;
ecb->setbreakpoint = onSetBreakpoint;
}
static void gatherFunctions(std::vector<Proto*>& results, Proto* proto)
{
if (results.size() <= size_t(proto->bytecodeid))
results.resize(proto->bytecodeid + 1);
// Skip protos that we've already compiled in this run: this happens because at -O2, inlined functions get their protos reused
if (results[proto->bytecodeid])
return;
results[proto->bytecodeid] = proto;
for (int i = 0; i < proto->sizep; i++)
gatherFunctions(results, proto->p[i]);
}
void compile(lua_State* L, int idx)
{
LUAU_ASSERT(lua_isLfunction(L, idx));
const TValue* func = luaA_toobject(L, idx);
// If initialization has failed, do not compile any functions
if (!getNativeState(L))
return;
AssemblyBuilderX64 build(/* logText= */ false);
NativeState* data = getNativeState(L);
std::vector<Proto*> protos;
gatherFunctions(protos, clvalue(func)->l.p);
std::vector<NativeProto*> results;
results.reserve(protos.size());
// Skip protos that have been compiled during previous invocations of CodeGen::compile
for (Proto* p : protos)
if (p && getProtoExecData(p) == nullptr)
results.push_back(assembleFunction(build, *data, p));
build.finalize();
uint8_t* nativeData = nullptr;
size_t sizeNativeData = 0;
uint8_t* codeStart = nullptr;
if (!data->codeAllocator.allocate(
build.data.data(), int(build.data.size()), build.code.data(), int(build.code.size()), nativeData, sizeNativeData, codeStart))
{
for (NativeProto* result : results)
destroyNativeProto(result);
return;
}
// Relocate instruction offsets
for (NativeProto* result : results)
{
for (int i = 0; i < result->proto->sizecode; i++)
result->instTargets[i] += uintptr_t(codeStart + result->location);
}
// Link native proto objects to Proto; the memory is now managed by VM and will be freed via onDestroyFunction
for (NativeProto* result : results)
setProtoExecData(result->proto, result);
}
std::string getAssemblyText(lua_State* L, int idx)
{
LUAU_ASSERT(lua_isLfunction(L, idx));
const TValue* func = luaA_toobject(L, idx);
AssemblyBuilderX64 build(/* logText= */ true);
NativeState data;
initFallbackTable(data);
initInstructionNames(data);
std::vector<Proto*> protos;
gatherFunctions(protos, clvalue(func)->l.p);
for (Proto* p : protos)
if (p)
{
NativeProto* nativeProto = assembleFunction(build, data, p);
destroyNativeProto(nativeProto);
}
build.finalize();
return build.text;
}
} // namespace CodeGen
} // namespace Luau

154
CodeGen/src/CodeGenX64.cpp Normal file
View File

@ -0,0 +1,154 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "CodeGenX64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/UnwindBuilder.h"
#include "CustomExecUtils.h"
#include "NativeState.h"
#include "EmitCommonX64.h"
#include "lstate.h"
/* An overview of native environment stack setup that we are making in the entry function:
* Each line is 8 bytes, stack grows downwards.
*
* | ... previous frames ...
* | rdx home space | (saved only on windows)
* | rcx home space | (saved only on windows)
* | return address |
* | ... saved non-volatile registers ...
* | unused | for 16 byte alignment of the stack
* | sCode |
* | sClosure | <-- rbp points here
* | argument 6 |
* | argument 5 |
* | r9 home space |
* | r8 home space |
* | rdx home space |
* | rcx home space | <-- rsp points here
*
* Arguments to our entry function are saved to home space only on Windows.
* Space for arguments to function we call is always reserved, but used only on Windows.
*
* Right now we use a frame pointer, but because of a fixed layout we can omit it in the future
*/
namespace Luau
{
namespace CodeGen
{
namespace x64
{
bool initEntryFunction(NativeState& data)
{
AssemblyBuilderX64 build(/* logText= */ false);
UnwindBuilder& unwind = *data.unwindBuilder.get();
unwind.start();
if (getCurrentX64ABI() == X64ABI::Windows)
{
// Place arguments in home space
build.mov(qword[rsp + 16], rArg2);
unwind.spill(16, rArg2);
build.mov(qword[rsp + 8], rArg1);
unwind.spill(8, rArg1);
// Save non-volatile registers that are specific to Windows x64 ABI
build.push(rdi);
unwind.save(rdi);
build.push(rsi);
unwind.save(rsi);
// Once we start using non-volatile SIMD registers, we will save those here
}
// Save common non-volatile registers
build.push(rbx);
unwind.save(rbx);
build.push(rbp);
unwind.save(rbp);
build.push(r12);
unwind.save(r12);
build.push(r13);
unwind.save(r13);
build.push(r14);
unwind.save(r14);
build.push(r15);
unwind.save(r15);
int stacksize = 32 + 16; // 4 home locations for registers, 16 bytes for additional function call arguments
int localssize = 24; // 3 local pointers that also correctly align the stack
// Allocate stack space (reg home area + local data)
build.sub(rsp, stacksize + localssize);
unwind.allocStack(stacksize + localssize);
// Setup frame pointer
build.lea(rbp, qword[rsp + stacksize]);
unwind.setupFrameReg(rbp, stacksize);
unwind.finish();
size_t prologueSize = build.setLabel().location;
// Setup native execution environment
build.mov(rState, rArg1);
build.mov(rNativeContext, rArg4);
build.mov(rBase, qword[rState + offsetof(lua_State, base)]); // L->base
build.mov(rax, qword[rState + offsetof(lua_State, ci)]); // L->ci
build.mov(rax, qword[rax + offsetof(CallInfo, func)]); // L->ci->func
build.mov(rax, qword[rax + offsetof(TValue, value.gc)]); // L->ci->func->value.gc aka cl
build.mov(sClosure, rax);
build.mov(rConstants, qword[rArg2 + offsetof(Proto, k)]); // proto->k
build.mov(rax, qword[rArg2 + offsetof(Proto, code)]); // proto->code
build.mov(sCode, rax);
// Jump to the specified instruction; further control flow will be handled with custom ABI with register setup from EmitCommonX64.h
build.jmp(rArg3);
// Even though we jumped away, we will return here in the end
Label returnOff = build.setLabel();
// Cleanup and exit
build.lea(rsp, qword[rbp + localssize]);
build.pop(r15);
build.pop(r14);
build.pop(r13);
build.pop(r12);
build.pop(rbp);
build.pop(rbx);
if (getCurrentX64ABI() == X64ABI::Windows)
{
build.pop(rsi);
build.pop(rdi);
}
build.ret();
build.finalize();
LUAU_ASSERT(build.data.empty());
if (!data.codeAllocator.allocate(build.data.data(), int(build.data.size()), build.code.data(), int(build.code.size()), data.gateData,
data.gateDataSize, data.context.gateEntry))
{
LUAU_ASSERT(!"failed to create entry function");
return false;
}
// Set the offset at the begining so that functions in new blocks will not overlay the locations
// specified by the unwind information of the entry function
unwind.setBeginOffset(prologueSize);
data.context.gateExit = data.context.gateEntry + returnOff.location;
return true;
}
} // namespace x64
} // namespace CodeGen
} // namespace Luau

18
CodeGen/src/CodeGenX64.h Normal file
View File

@ -0,0 +1,18 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
struct NativeState;
namespace x64
{
bool initEntryFunction(NativeState& data);
} // namespace x64
} // namespace CodeGen
} // namespace Luau

View File

@ -0,0 +1,145 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "NativeState.h"
#include "lobject.h"
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
// Here we define helper functions to wrap interaction with Luau custom execution API so that it works with or without LUA_CUSTOM_EXECUTION
#if LUA_CUSTOM_EXECUTION
inline lua_ExecutionCallbacks* getExecutionCallbacks(lua_State* L)
{
return &L->global->ecb;
}
inline NativeState* getNativeState(lua_State* L)
{
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
return (NativeState*)ecb->context;
}
inline void setNativeState(lua_State* L, NativeState* nativeState)
{
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
ecb->context = nativeState;
}
inline NativeState* createNativeState(lua_State* L)
{
NativeState* state = new NativeState();
setNativeState(L, state);
return state;
}
inline void destroyNativeState(lua_State* L)
{
NativeState* state = getNativeState(L);
setNativeState(L, nullptr);
delete state;
}
inline NativeProto* getProtoExecData(Proto* proto)
{
return (NativeProto*)proto->execdata;
}
inline void setProtoExecData(Proto* proto, NativeProto* nativeProto)
{
if (nativeProto)
LUAU_ASSERT(proto->execdata == nullptr);
proto->execdata = nativeProto;
}
#define offsetofProtoExecData offsetof(Proto, execdata)
#else
inline lua_ExecutionCallbacks* getExecutionCallbacks(lua_State* L)
{
return nullptr;
}
inline NativeState* getNativeState(lua_State* L)
{
return nullptr;
}
inline void setNativeState(lua_State* L, NativeState* nativeState) {}
inline NativeState* createNativeState(lua_State* L)
{
return nullptr;
}
inline void destroyNativeState(lua_State* L) {}
inline NativeProto* getProtoExecData(Proto* proto)
{
return nullptr;
}
inline void setProtoExecData(Proto* proto, NativeProto* nativeProto) {}
#define offsetofProtoExecData 0
#endif
inline int getOpLength(LuauOpcode op)
{
switch (op)
{
case LOP_GETGLOBAL:
case LOP_SETGLOBAL:
case LOP_GETIMPORT:
case LOP_GETTABLEKS:
case LOP_SETTABLEKS:
case LOP_NAMECALL:
case LOP_JUMPIFEQ:
case LOP_JUMPIFLE:
case LOP_JUMPIFLT:
case LOP_JUMPIFNOTEQ:
case LOP_JUMPIFNOTLE:
case LOP_JUMPIFNOTLT:
case LOP_NEWTABLE:
case LOP_SETLIST:
case LOP_FORGLOOP:
case LOP_LOADKX:
case LOP_FASTCALL2:
case LOP_FASTCALL2K:
case LOP_JUMPXEQKNIL:
case LOP_JUMPXEQKB:
case LOP_JUMPXEQKN:
case LOP_JUMPXEQKS:
return 2;
default:
return 1;
}
}
enum class X64ABI
{
Windows,
SystemV,
};
inline X64ABI getCurrentX64ABI()
{
#if defined(_WIN32)
return X64ABI::Windows;
#else
return X64ABI::SystemV;
#endif
}
} // namespace CodeGen
} // namespace Luau

View File

@ -0,0 +1,109 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "EmitBuiltinsX64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/Bytecode.h"
#include "EmitCommonX64.h"
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
BuiltinImplResult emitBuiltinAssert(AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults, Label& fallback)
{
if (nparams < 1 || nresults != 0)
return {BuiltinImplType::None, -1};
if (build.logText)
build.logAppend("; inlined LBF_ASSERT\n");
Label skip;
jumpIfFalsy(build, arg, fallback, skip);
// TODO: use of 'skip' causes a jump to a jump instruction that skips the fallback - can be optimized
build.setLabel(skip);
return {BuiltinImplType::UsesFallback, 0};
}
BuiltinImplResult emitBuiltinMathFloor(AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults, Label& fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
if (build.logText)
build.logAppend("; inlined LBF_MATH_FLOOR\n");
jumpIfTagIsNot(build, arg, LUA_TNUMBER, fallback);
build.vroundsd(xmm0, xmm0, luauRegValue(arg), RoundingModeX64::RoundToNegativeInfinity);
build.vmovsd(luauRegValue(ra), xmm0);
if (ra != arg)
build.mov(luauRegTag(ra), LUA_TNUMBER);
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult emitBuiltinMathCeil(AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults, Label& fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
if (build.logText)
build.logAppend("; inlined LBF_MATH_CEIL\n");
jumpIfTagIsNot(build, arg, LUA_TNUMBER, fallback);
build.vroundsd(xmm0, xmm0, luauRegValue(arg), RoundingModeX64::RoundToPositiveInfinity);
build.vmovsd(luauRegValue(ra), xmm0);
if (ra != arg)
build.mov(luauRegTag(ra), LUA_TNUMBER);
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult emitBuiltinMathSqrt(AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults, Label& fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
if (build.logText)
build.logAppend("; inlined LBF_MATH_SQRT\n");
jumpIfTagIsNot(build, arg, LUA_TNUMBER, fallback);
build.vsqrtsd(xmm0, xmm0, luauRegValue(arg));
build.vmovsd(luauRegValue(ra), xmm0);
if (ra != arg)
build.mov(luauRegTag(ra), LUA_TNUMBER);
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult emitBuiltin(AssemblyBuilderX64& build, int bfid, int nparams, int ra, int arg, OperandX64 args, int nresults, Label& fallback)
{
switch (bfid)
{
case LBF_ASSERT:
return emitBuiltinAssert(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_FLOOR:
return emitBuiltinMathFloor(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_CEIL:
return emitBuiltinMathCeil(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_SQRT:
return emitBuiltinMathSqrt(build, nparams, ra, arg, args, nresults, fallback);
default:
return {BuiltinImplType::None, -1};
}
}
} // namespace CodeGen
} // namespace Luau

View File

@ -0,0 +1,28 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
class AssemblyBuilderX64;
struct Label;
struct OperandX64;
enum class BuiltinImplType
{
None,
UsesFallback, // Uses fallback for unsupported cases
};
struct BuiltinImplResult
{
BuiltinImplType type;
int actualResultCount;
};
BuiltinImplResult emitBuiltin(AssemblyBuilderX64& build, int bfid, int nparams, int ra, int arg, OperandX64 args, int nresults, Label& fallback);
} // namespace CodeGen
} // namespace Luau

View File

@ -0,0 +1,345 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "EmitCommonX64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "CustomExecUtils.h"
#include "NativeState.h"
#include "lgc.h"
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, Condition cond, Label& label)
{
// Refresher on comi/ucomi EFLAGS:
// CF only: less
// ZF only: equal
// PF+CF+ZF: unordered (NaN)
if (rhs.cat == CategoryX64::reg)
{
build.vucomisd(rhs, lhs);
}
else
{
build.vmovsd(tmp, rhs);
build.vucomisd(tmp, lhs);
}
// Keep in mind that 'Not' conditions want 'true' for comparisons with NaN
// And because of NaN, integer check interchangeability like 'not less or equal' <-> 'greater' does not hold
switch (cond)
{
case Condition::NotLessEqual:
// (b < a) is the same as !(a <= b). jnae checks CF=1 which means < or NaN
build.jcc(Condition::NotAboveEqual, label);
break;
case Condition::LessEqual:
// (b >= a) is the same as (a <= b). jae checks CF=0 which means >= and not NaN
build.jcc(Condition::AboveEqual, label);
break;
case Condition::NotLess:
// (b <= a) is the same as !(a < b). jna checks CF=1 or ZF=1 which means <= or NaN
build.jcc(Condition::NotAbove, label);
break;
case Condition::Less:
// (b > a) is the same as (a < b). ja checks CF=0 and ZF=0 which means > and not NaN
build.jcc(Condition::Above, label);
break;
case Condition::NotEqual:
// ZF=0 or PF=1 means != or NaN
build.jcc(Condition::NotZero, label);
build.jcc(Condition::Parity, label);
break;
default:
LUAU_ASSERT(!"Unsupported condition");
}
}
void jumpOnAnyCmpFallback(AssemblyBuilderX64& build, int ra, int rb, Condition cond, Label& label, int pcpos)
{
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(ra));
build.lea(rArg3, luauRegValue(rb));
if (cond == Condition::NotLessEqual || cond == Condition::LessEqual)
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_lessequal)]);
else if (cond == Condition::NotLess || cond == Condition::Less)
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_lessthan)]);
else if (cond == Condition::NotEqual || cond == Condition::Equal)
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_equalval)]);
else
LUAU_ASSERT(!"Unsupported condition");
emitUpdateBase(build);
build.test(eax, eax);
build.jcc(
cond == Condition::NotLessEqual || cond == Condition::NotLess || cond == Condition::NotEqual ? Condition::Zero : Condition::NotZero, label);
}
void convertNumberToIndexOrJump(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 numd, RegisterX64 numi, int ri, Label& label)
{
LUAU_ASSERT(numi.size == SizeX64::dword);
build.vmovsd(numd, luauRegValue(ri));
// Convert to integer, NaN is converted into 0x80000000
build.vcvttsd2si(numi, numd);
// Convert that integer back to double
build.vcvtsi2sd(tmp, numd, numi);
build.vucomisd(tmp, numd); // Sets ZF=1 if equal or NaN
// We don't need non-integer values
// But to skip the PF=1 check, we proceed with NaN because 0x80000000 index is out of bounds
build.jcc(Condition::NotZero, label);
}
void callArithHelper(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c, int pcpos, TMS tm)
{
emitSetSavedPc(build, pcpos + 1);
if (getCurrentX64ABI() == X64ABI::Windows)
build.mov(sArg5, tm);
else
build.mov(rArg5, tm);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(ra));
build.lea(rArg3, luauRegValue(rb));
build.lea(rArg4, c);
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_doarith)]);
emitUpdateBase(build);
}
void callLengthHelper(AssemblyBuilderX64& build, int ra, int rb, int pcpos)
{
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(ra));
build.lea(rArg3, luauRegValue(rb));
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_dolen)]);
emitUpdateBase(build);
}
void callPrepareForN(AssemblyBuilderX64& build, int limit, int step, int init, int pcpos)
{
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(limit));
build.lea(rArg3, luauRegValue(step));
build.lea(rArg4, luauRegValue(init));
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_prepareFORN)]);
}
void callGetTable(AssemblyBuilderX64& build, int rb, OperandX64 c, int ra, int pcpos)
{
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(rb));
build.lea(rArg3, c);
build.lea(rArg4, luauRegValue(ra));
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_gettable)]);
emitUpdateBase(build);
}
void callSetTable(AssemblyBuilderX64& build, int rb, OperandX64 c, int ra, int pcpos)
{
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(rb));
build.lea(rArg3, c);
build.lea(rArg4, luauRegValue(ra));
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_settable)]);
emitUpdateBase(build);
}
void callBarrierTable(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 table, int ra, Label& skip)
{
LUAU_ASSERT(tmp != table);
// iscollectable(ra)
build.cmp(luauRegTag(ra), LUA_TSTRING);
build.jcc(Condition::Less, skip);
// isblack(obj2gco(h))
build.test(byte[table + offsetof(GCheader, marked)], bitmask(BLACKBIT));
build.jcc(Condition::Zero, skip);
// iswhite(gcvalue(ra))
build.mov(tmp, luauRegValue(ra));
build.test(byte[tmp + offsetof(GCheader, marked)], bit2mask(WHITE0BIT, WHITE1BIT));
build.jcc(Condition::Zero, skip);
LUAU_ASSERT(table != rArg3);
build.mov(rArg3, tmp);
build.mov(rArg2, table);
build.mov(rArg1, rState);
build.call(qword[rNativeContext + offsetof(NativeContext, luaC_barriertable)]);
}
void emitExit(AssemblyBuilderX64& build, bool continueInVm)
{
if (continueInVm)
build.mov(al, 1);
else
build.xor_(eax, eax);
build.jmp(qword[rNativeContext + offsetof(NativeContext, gateExit)]);
}
void emitUpdateBase(AssemblyBuilderX64& build)
{
build.mov(rBase, qword[rState + offsetof(lua_State, base)]);
}
// Note: only uses rax/rdx, the caller may use other registers
void emitSetSavedPc(AssemblyBuilderX64& build, int pcpos)
{
build.mov(rdx, sCode);
build.add(rdx, pcpos * sizeof(Instruction));
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.mov(qword[rax + offsetof(CallInfo, savedpc)], rdx);
}
void emitInterrupt(AssemblyBuilderX64& build, int pcpos)
{
Label skip;
// Skip if there is no interrupt set
build.mov(r8, qword[rState + offsetof(lua_State, global)]);
build.mov(r8, qword[r8 + offsetof(global_State, cb.interrupt)]);
build.test(r8, r8);
build.jcc(Condition::Zero, skip);
emitSetSavedPc(build, pcpos + 1); // uses rax/rdx
// Call interrupt
// TODO: This code should move to the end of the function, or even be outlined so that it can be shared by multiple interruptible instructions
build.mov(rArg1, rState);
build.mov(rArg2d, -1);
build.call(r8);
// Check if we need to exit
build.mov(al, byte[rState + offsetof(lua_State, status)]);
build.test(al, al);
build.jcc(Condition::Zero, skip);
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.sub(qword[rax + offsetof(CallInfo, savedpc)], sizeof(Instruction));
emitExit(build, /* continueInVm */ false);
build.setLabel(skip);
}
void emitFallback(AssemblyBuilderX64& build, NativeState& data, int op, int pcpos)
{
if (op == LOP_CAPTURE)
return;
NativeFallback& opinfo = data.context.fallback[op];
LUAU_ASSERT(opinfo.fallback);
if (build.logText)
build.logAppend("; fallback\n");
// fallback(L, instruction, base, k)
build.mov(rArg1, rState);
build.mov(rArg2, sCode);
build.add(rArg2, pcpos * sizeof(Instruction));
build.mov(rArg3, rBase);
build.mov(rArg4, rConstants);
build.call(qword[rNativeContext + offsetof(NativeContext, fallback) + op * sizeof(NativeFallback) + offsetof(NativeFallback, fallback)]);
// Some instructions may interrupt the execution
if (opinfo.flags & kFallbackCheckInterrupt)
{
Label skip;
build.test(rax, rax);
build.jcc(Condition::NotZero, skip);
emitExit(build, /* continueInVm */ false);
build.setLabel(skip);
}
emitUpdateBase(build);
// Some instructions may jump to a different instruction or a completely different function
if (opinfo.flags & kFallbackUpdatePc)
{
build.mov(rcx, sClosure);
build.mov(rcx, qword[rcx + offsetof(Closure, l.p)]);
// Get instruction index from returned instruction pointer
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 8 back to byte offset later so it cancels out
build.sub(rax, sCode);
build.mov(rdx, qword[rcx + offsetofProtoExecData]);
// Get new instruction location and jump to it
build.mov(rcx, qword[rdx + offsetof(NativeProto, instTargets)]);
build.jmp(qword[rax * 2 + rcx]);
}
else if (opinfo.flags & kFallbackUpdateCi)
{
// Need to update state of the current function before we jump away
build.mov(rcx, qword[rState + offsetof(lua_State, ci)]); // L->ci
build.mov(rcx, qword[rcx + offsetof(CallInfo, func)]); // L->ci->func
build.mov(rcx, qword[rcx + offsetof(TValue, value.gc)]); // L->ci->func->value.gc aka cl
build.mov(sClosure, rcx);
build.mov(rsi, qword[rcx + offsetof(Closure, l.p)]); // cl->l.p aka proto
build.mov(rConstants, qword[rsi + offsetof(Proto, k)]); // proto->k
build.mov(rcx, qword[rsi + offsetof(Proto, code)]); // proto->code
build.mov(sCode, rcx);
// We'll need original instruction pointer later to handle return to interpreter
if (op == LOP_CALL)
build.mov(r9, rax);
// Get instruction index from instruction pointer
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 8 back to byte offset later so it cancels out
build.sub(rax, sCode);
// We need to check if the new function can be executed natively
Label returnToInterpreter;
build.mov(rdx, qword[rsi + offsetofProtoExecData]);
build.test(rdx, rdx);
build.jcc(Condition::Zero, returnToInterpreter);
// Get new instruction location and jump to it
build.mov(rcx, qword[rdx + offsetof(NativeProto, instTargets)]);
build.jmp(qword[rax * 2 + rcx]);
build.setLabel(returnToInterpreter);
// If we are returning to the interpreter to make a call, we need to update the current instruction
if (op == LOP_CALL)
{
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.mov(qword[rax + offsetof(CallInfo, savedpc)], r9);
}
// Continue in the interpreter
emitExit(build, /* continueInVm */ true);
}
}
} // namespace CodeGen
} // namespace Luau

175
CodeGen/src/EmitCommonX64.h Normal file
View File

@ -0,0 +1,175 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderX64.h"
#include "lobject.h"
#include "ltm.h"
// MS x64 ABI reminder:
// Arguments: rcx, rdx, r8, r9 ('overlapped' with xmm0-xmm3)
// Return: rax, xmm0
// Nonvolatile: r12-r15, rdi, rsi, rbx, rbp
// SIMD: only xmm6-xmm15 are non-volatile, all ymm upper parts are volatile
// AMD64 ABI reminder:
// Arguments: rdi, rsi, rdx, rcx, r8, r9 (xmm0-xmm7)
// Return: rax, rdx, xmm0, xmm1
// Nonvolatile: r12-r15, rbx, rbp
// SIMD: all volatile
namespace Luau
{
namespace CodeGen
{
struct NativeState;
// Data that is very common to access is placed in non-volatile registers
constexpr RegisterX64 rState = r15; // lua_State* L
constexpr RegisterX64 rBase = r14; // StkId base
constexpr RegisterX64 rNativeContext = r13; // NativeContext* context
constexpr RegisterX64 rConstants = r12; // TValue* k
// Native code is as stackless as the interpreter, so we can place some data on the stack once and have it accessible at any point
constexpr OperandX64 sClosure = qword[rbp + 0]; // Closure* cl
constexpr OperandX64 sCode = qword[rbp + 8]; // Instruction* code
#if defined(_WIN32)
constexpr RegisterX64 rArg1 = rcx;
constexpr RegisterX64 rArg2 = rdx;
constexpr RegisterX64 rArg2d = edx;
constexpr RegisterX64 rArg3 = r8;
constexpr RegisterX64 rArg4 = r9;
constexpr RegisterX64 rArg5 = noreg;
constexpr RegisterX64 rArg6 = noreg;
constexpr OperandX64 sArg5 = qword[rsp + 32];
constexpr OperandX64 sArg6 = qword[rsp + 40];
#else
constexpr RegisterX64 rArg1 = rdi;
constexpr RegisterX64 rArg2 = rsi;
constexpr RegisterX64 rArg2d = esi;
constexpr RegisterX64 rArg3 = rdx;
constexpr RegisterX64 rArg4 = rcx;
constexpr RegisterX64 rArg5 = r8;
constexpr RegisterX64 rArg6 = r9;
constexpr OperandX64 sArg5 = noreg;
constexpr OperandX64 sArg6 = noreg;
#endif
constexpr unsigned kTValueSizeLog2 = 4;
inline OperandX64 luauReg(int ri)
{
return xmmword[rBase + ri * sizeof(TValue)];
}
inline OperandX64 luauRegValue(int ri)
{
return qword[rBase + ri * sizeof(TValue) + offsetof(TValue, value)];
}
inline OperandX64 luauRegTag(int ri)
{
return dword[rBase + ri * sizeof(TValue) + offsetof(TValue, tt)];
}
inline OperandX64 luauRegValueBoolean(int ri)
{
return dword[rBase + ri * sizeof(TValue) + offsetof(TValue, value)];
}
inline OperandX64 luauConstant(int ki)
{
return xmmword[rConstants + ki * sizeof(TValue)];
}
inline OperandX64 luauConstantValue(int ki)
{
return qword[rConstants + ki * sizeof(TValue) + offsetof(TValue, value)];
}
inline void setLuauReg(AssemblyBuilderX64& build, RegisterX64 tmp, int ri, OperandX64 op)
{
LUAU_ASSERT(op.cat == CategoryX64::mem);
build.vmovups(tmp, op);
build.vmovups(luauReg(ri), tmp);
}
inline void jumpIfTagIs(AssemblyBuilderX64& build, int ri, lua_Type tag, Label& label)
{
build.cmp(luauRegTag(ri), tag);
build.jcc(Condition::Equal, label);
}
inline void jumpIfTagIsNot(AssemblyBuilderX64& build, int ri, lua_Type tag, Label& label)
{
build.cmp(luauRegTag(ri), tag);
build.jcc(Condition::NotEqual, label);
}
// Note: fallthrough label should be placed after this condition
inline void jumpIfFalsy(AssemblyBuilderX64& build, int ri, Label& target, Label& fallthrough)
{
jumpIfTagIs(build, ri, LUA_TNIL, target); // false if nil
jumpIfTagIsNot(build, ri, LUA_TBOOLEAN, fallthrough); // true if not nil or boolean
build.cmp(luauRegValueBoolean(ri), 0);
build.jcc(Condition::Equal, target); // true if boolean value is 'true'
}
// Note: fallthrough label should be placed after this condition
inline void jumpIfTruthy(AssemblyBuilderX64& build, int ri, Label& target, Label& fallthrough)
{
jumpIfTagIs(build, ri, LUA_TNIL, fallthrough); // false if nil
jumpIfTagIsNot(build, ri, LUA_TBOOLEAN, target); // true if not nil or boolean
build.cmp(luauRegValueBoolean(ri), 0);
build.jcc(Condition::NotEqual, target); // true if boolean value is 'true'
}
inline void jumpIfMetatablePresent(AssemblyBuilderX64& build, RegisterX64 table, Label& target)
{
build.cmp(qword[table + offsetof(Table, metatable)], 0);
build.jcc(Condition::NotEqual, target);
}
inline void jumpIfUnsafeEnv(AssemblyBuilderX64& build, RegisterX64 tmp, Label& label)
{
build.mov(tmp, sClosure);
build.mov(tmp, qword[tmp + offsetof(Closure, env)]);
build.test(byte[tmp + offsetof(Table, safeenv)], 1);
build.jcc(Condition::Zero, label); // Not a safe environment
}
inline void jumpIfTableIsReadOnly(AssemblyBuilderX64& build, RegisterX64 table, Label& label)
{
build.cmp(byte[table + offsetof(Table, readonly)], 0);
build.jcc(Condition::NotEqual, label);
}
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, Condition cond, Label& label);
void jumpOnAnyCmpFallback(AssemblyBuilderX64& build, int ra, int rb, Condition cond, Label& label, int pcpos);
void convertNumberToIndexOrJump(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 numd, RegisterX64 numi, int ri, Label& label);
void callArithHelper(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c, int pcpos, TMS tm);
void callLengthHelper(AssemblyBuilderX64& build, int ra, int rb, int pcpos);
void callPrepareForN(AssemblyBuilderX64& build, int limit, int step, int init, int pcpos);
void callGetTable(AssemblyBuilderX64& build, int rb, OperandX64 c, int ra, int pcpos);
void callSetTable(AssemblyBuilderX64& build, int rb, OperandX64 c, int ra, int pcpos);
void callBarrierTable(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 table, int ra, Label& skip);
void emitExit(AssemblyBuilderX64& build, bool continueInVm);
void emitUpdateBase(AssemblyBuilderX64& build);
void emitSetSavedPc(AssemblyBuilderX64& build, int pcpos); // Note: only uses rax/rdx, the caller may use other registers
void emitInterrupt(AssemblyBuilderX64& build, int pcpos);
void emitFallback(AssemblyBuilderX64& build, NativeState& data, int op, int pcpos);
} // namespace CodeGen
} // namespace Luau

View File

@ -0,0 +1,925 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "EmitInstructionX64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "CustomExecUtils.h"
#include "EmitBuiltinsX64.h"
#include "EmitCommonX64.h"
#include "NativeState.h"
#include "lobject.h"
#include "ltm.h"
namespace Luau
{
namespace CodeGen
{
void emitInstLoadNil(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc)
{
int ra = LUAU_INSN_A(*pc);
build.mov(luauRegTag(ra), LUA_TNIL);
}
void emitInstLoadB(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr)
{
int ra = LUAU_INSN_A(*pc);
build.mov(luauRegValue(ra), LUAU_INSN_B(*pc));
build.mov(luauRegTag(ra), LUA_TBOOLEAN);
if (int target = LUAU_INSN_C(*pc))
build.jmp(labelarr[pcpos + target + 1]);
}
void emitInstLoadN(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc)
{
int ra = LUAU_INSN_A(*pc);
build.vmovsd(xmm0, build.f64(double(LUAU_INSN_D(*pc))));
build.vmovsd(luauRegValue(ra), xmm0);
build.mov(luauRegTag(ra), LUA_TNUMBER);
}
void emitInstLoadK(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k)
{
int ra = LUAU_INSN_A(*pc);
build.vmovups(xmm0, luauConstant(LUAU_INSN_D(*pc)));
build.vmovups(luauReg(ra), xmm0);
}
void emitInstMove(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc)
{
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
build.vmovups(xmm0, luauReg(rb));
build.vmovups(luauReg(ra), xmm0);
}
void emitInstJump(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr)
{
build.jmp(labelarr[pcpos + LUAU_INSN_D(*pc) + 1]);
}
void emitInstJumpBack(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr)
{
emitInterrupt(build, pcpos);
build.jmp(labelarr[pcpos + LUAU_INSN_D(*pc) + 1]);
}
void emitInstJumpIf(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr, bool not_)
{
int ra = LUAU_INSN_A(*pc);
Label& target = labelarr[pcpos + LUAU_INSN_D(*pc) + 1];
Label& exit = labelarr[pcpos + 1];
if (not_)
jumpIfFalsy(build, ra, target, exit);
else
jumpIfTruthy(build, ra, target, exit);
}
void emitInstJumpIfEq(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr, bool not_)
{
int ra = LUAU_INSN_A(*pc);
int rb = pc[1];
Label& target = labelarr[pcpos + LUAU_INSN_D(*pc) + 1];
Label& exit = labelarr[pcpos + 2];
Label any;
build.mov(eax, luauRegTag(ra));
build.cmp(eax, luauRegTag(rb));
build.jcc(Condition::NotEqual, not_ ? target : exit);
// fast-path: number
build.cmp(eax, LUA_TNUMBER);
build.jcc(Condition::NotEqual, any);
jumpOnNumberCmp(build, xmm0, luauRegValue(ra), luauRegValue(rb), Condition::NotEqual, not_ ? target : exit);
build.jmp(not_ ? exit : target);
// slow-path
// TODO: move to the end of the function
build.setLabel(any);
jumpOnAnyCmpFallback(build, ra, rb, not_ ? Condition::NotEqual : Condition::Equal, target, pcpos);
}
void emitInstJumpIfCond(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr, Condition cond)
{
int ra = LUAU_INSN_A(*pc);
int rb = pc[1];
Label& target = labelarr[pcpos + LUAU_INSN_D(*pc) + 1];
Label& exit = labelarr[pcpos + 2];
Label any;
// fast-path: number
jumpIfTagIsNot(build, ra, LUA_TNUMBER, any);
jumpIfTagIsNot(build, rb, LUA_TNUMBER, any);
jumpOnNumberCmp(build, xmm0, luauRegValue(ra), luauRegValue(rb), cond, target);
build.jmp(exit);
// slow-path
// TODO: move to the end of the function
build.setLabel(any);
jumpOnAnyCmpFallback(build, ra, rb, cond, target, pcpos);
}
void emitInstJumpX(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr)
{
emitInterrupt(build, pcpos);
build.jmp(labelarr[pcpos + LUAU_INSN_E(*pc) + 1]);
}
void emitInstJumpxEqNil(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr)
{
int ra = LUAU_INSN_A(*pc);
bool not_ = (pc[1] & 0x80000000) != 0;
Label& target = labelarr[pcpos + LUAU_INSN_D(*pc) + 1];
build.cmp(luauRegTag(ra), LUA_TNIL);
build.jcc(not_ ? Condition::NotEqual : Condition::Equal, target);
}
void emitInstJumpxEqB(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr)
{
int ra = LUAU_INSN_A(*pc);
uint32_t aux = pc[1];
bool not_ = (aux & 0x80000000) != 0;
Label& target = labelarr[pcpos + LUAU_INSN_D(*pc) + 1];
Label& exit = labelarr[pcpos + 2];
jumpIfTagIsNot(build, ra, LUA_TBOOLEAN, not_ ? target : exit);
build.test(luauRegValueBoolean(ra), 1);
build.jcc((aux & 0x1) ^ not_ ? Condition::NotZero : Condition::Zero, target);
}
void emitInstJumpxEqN(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr)
{
int ra = LUAU_INSN_A(*pc);
uint32_t aux = pc[1];
bool not_ = (aux & 0x80000000) != 0;
TValue kv = k[aux & 0xffffff];
Label& target = labelarr[pcpos + LUAU_INSN_D(*pc) + 1];
Label& exit = labelarr[pcpos + 2];
jumpIfTagIsNot(build, ra, LUA_TNUMBER, not_ ? target : exit);
if (not_)
{
jumpOnNumberCmp(build, xmm0, luauRegValue(ra), build.f64(kv.value.n), Condition::NotEqual, target);
}
else
{
// Compact equality check requires two labels, so it's not supported in generic 'jumpOnNumberCmp'
build.vmovsd(xmm0, luauRegValue(ra));
build.vucomisd(xmm0, build.f64(kv.value.n));
build.jcc(Condition::Parity, exit); // We first have to check PF=1 for NaN operands, because it also sets ZF=1
build.jcc(Condition::Zero, target); // Now that NaN is out of the way, we can check ZF=1 for equality
}
}
void emitInstJumpxEqS(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr)
{
int ra = LUAU_INSN_A(*pc);
uint32_t aux = pc[1];
bool not_ = (aux & 0x80000000) != 0;
Label& target = labelarr[pcpos + LUAU_INSN_D(*pc) + 1];
Label& exit = labelarr[pcpos + 2];
jumpIfTagIsNot(build, ra, LUA_TSTRING, not_ ? target : exit);
build.mov(rax, luauRegValue(ra));
build.cmp(rax, luauConstantValue(aux & 0xffffff));
build.jcc(not_ ? Condition::NotEqual : Condition::Equal, target);
}
static void emitInstBinaryNumeric(AssemblyBuilderX64& build, int ra, int rb, int rc, OperandX64 opc, int pcpos, TMS tm)
{
Label common, exit;
jumpIfTagIsNot(build, rb, LUA_TNUMBER, common);
if (rc != -1 && rc != rb)
jumpIfTagIsNot(build, rc, LUA_TNUMBER, common);
// fast-path: number
build.vmovsd(xmm0, luauRegValue(rb));
switch (tm)
{
case TM_ADD:
build.vaddsd(xmm0, xmm0, opc);
break;
case TM_SUB:
build.vsubsd(xmm0, xmm0, opc);
break;
case TM_MUL:
build.vmulsd(xmm0, xmm0, opc);
break;
case TM_DIV:
build.vdivsd(xmm0, xmm0, opc);
break;
case TM_MOD:
// This follows the implementation of 'luai_nummod' which is less precise than 'fmod' for better performance
build.vmovsd(xmm1, opc);
build.vdivsd(xmm2, xmm0, xmm1);
build.vroundsd(xmm2, xmm2, xmm2, RoundingModeX64::RoundToNegativeInfinity);
build.vmulsd(xmm1, xmm2, xmm1);
build.vsubsd(xmm0, xmm0, xmm1);
break;
case TM_POW:
build.vmovsd(xmm1, luauRegValue(rc));
build.call(qword[rNativeContext + offsetof(NativeContext, libm_pow)]);
break;
default:
LUAU_ASSERT(!"unsupported binary op");
}
build.vmovsd(luauRegValue(ra), xmm0);
if (ra != rb && ra != rc)
build.mov(luauRegTag(ra), LUA_TNUMBER);
build.jmp(exit);
// slow-path
// TODO: move to the end of the function
build.setLabel(common);
callArithHelper(build, ra, rb, opc, pcpos, tm);
build.setLabel(exit);
}
void emitInstAdd(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), LUAU_INSN_C(*pc), luauRegValue(LUAU_INSN_C(*pc)), pcpos, TM_ADD);
}
void emitInstSub(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), LUAU_INSN_C(*pc), luauRegValue(LUAU_INSN_C(*pc)), pcpos, TM_SUB);
}
void emitInstMul(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), LUAU_INSN_C(*pc), luauRegValue(LUAU_INSN_C(*pc)), pcpos, TM_MUL);
}
void emitInstDiv(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), LUAU_INSN_C(*pc), luauRegValue(LUAU_INSN_C(*pc)), pcpos, TM_DIV);
}
void emitInstMod(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), LUAU_INSN_C(*pc), luauRegValue(LUAU_INSN_C(*pc)), pcpos, TM_MOD);
}
void emitInstPow(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), LUAU_INSN_C(*pc), luauRegValue(LUAU_INSN_C(*pc)), pcpos, TM_POW);
}
void emitInstAddK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), -1, luauConstantValue(LUAU_INSN_C(*pc)), pcpos, TM_ADD);
}
void emitInstSubK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), -1, luauConstantValue(LUAU_INSN_C(*pc)), pcpos, TM_SUB);
}
void emitInstMulK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), -1, luauConstantValue(LUAU_INSN_C(*pc)), pcpos, TM_MUL);
}
void emitInstDivK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), -1, luauConstantValue(LUAU_INSN_C(*pc)), pcpos, TM_DIV);
}
void emitInstModK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos)
{
emitInstBinaryNumeric(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), -1, luauConstantValue(LUAU_INSN_C(*pc)), pcpos, TM_MOD);
}
void emitInstPowK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos)
{
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
double kv = nvalue(&k[LUAU_INSN_C(*pc)]);
Label common, exit;
jumpIfTagIsNot(build, rb, LUA_TNUMBER, common);
// fast-path: number
build.vmovsd(xmm0, luauRegValue(rb));
// Specialize for a few constants, similar to how it's done in the VM
if (kv == 2.0)
{
build.vmulsd(xmm0, xmm0, xmm0);
}
else if (kv == 0.5)
{
build.vsqrtsd(xmm0, xmm0, xmm0);
}
else if (kv == 3.0)
{
build.vmulsd(xmm1, xmm0, xmm0);
build.vmulsd(xmm0, xmm0, xmm1);
}
else
{
build.vmovsd(xmm1, build.f64(kv));
build.call(qword[rNativeContext + offsetof(NativeContext, libm_pow)]);
}
build.vmovsd(luauRegValue(ra), xmm0);
if (ra != rb)
build.mov(luauRegTag(ra), LUA_TNUMBER);
build.jmp(exit);
// slow-path
// TODO: move to the end of the function
build.setLabel(common);
callArithHelper(build, ra, rb, luauConstantValue(LUAU_INSN_C(*pc)), pcpos, TM_POW);
build.setLabel(exit);
}
void emitInstNot(AssemblyBuilderX64& build, const Instruction* pc)
{
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
Label saveone, savezero, exit;
jumpIfFalsy(build, rb, saveone, savezero);
build.setLabel(savezero);
build.mov(luauRegValueBoolean(ra), 0);
build.jmp(exit);
build.setLabel(saveone);
build.mov(luauRegValueBoolean(ra), 1);
build.setLabel(exit);
build.mov(luauRegTag(ra), LUA_TBOOLEAN);
}
void emitInstMinus(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
Label any, exit;
jumpIfTagIsNot(build, rb, LUA_TNUMBER, any);
// fast-path: number
build.vxorpd(xmm0, xmm0, xmm0);
build.vsubsd(xmm0, xmm0, luauRegValue(rb));
build.vmovsd(luauRegValue(ra), xmm0);
if (ra != rb)
build.mov(luauRegTag(ra), LUA_TNUMBER);
build.jmp(exit);
// slow-path
// TODO: move to the end of the function
build.setLabel(any);
callArithHelper(build, ra, rb, luauRegValue(rb), pcpos, TM_UNM);
build.setLabel(exit);
}
void emitInstLength(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
Label any, exit;
jumpIfTagIsNot(build, rb, LUA_TTABLE, any);
// fast-path: table without __len
build.mov(rArg1, luauRegValue(rb));
jumpIfMetatablePresent(build, rArg1, any);
// First argument (Table*) is already in rArg1
build.call(qword[rNativeContext + offsetof(NativeContext, luaH_getn)]);
build.vcvtsi2sd(xmm0, xmm0, eax);
build.vmovsd(luauRegValue(ra), xmm0);
build.mov(luauRegTag(ra), LUA_TNUMBER);
build.jmp(exit);
// slow-path
// TODO: move to the end of the function
build.setLabel(any);
callLengthHelper(build, ra, rb, pcpos);
build.setLabel(exit);
}
void emitInstGetUpval(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
int ra = LUAU_INSN_A(*pc);
int up = LUAU_INSN_B(*pc);
build.mov(rax, sClosure);
build.add(rax, offsetof(Closure, l.uprefs) + sizeof(TValue) * up);
// uprefs[] is either an actual value, or it points to UpVal object which has a pointer to value
Label skip;
// TODO: jumpIfTagIsNot can be generalized to take OperandX64 and then we can use it here; let's wait until we see this more though
build.cmp(dword[rax + offsetof(TValue, tt)], LUA_TUPVAL);
build.jcc(Condition::NotEqual, skip);
// UpVal.v points to the value (either on stack, or on heap inside each UpVal, but we can deref it unconditionally)
build.mov(rax, qword[rax + offsetof(TValue, value.gc)]);
build.mov(rax, qword[rax + offsetof(UpVal, v)]);
build.setLabel(skip);
build.vmovups(xmm0, xmmword[rax]);
build.vmovups(luauReg(ra), xmm0);
}
static void emitInstFastCallN(
AssemblyBuilderX64& build, const Instruction* pc, bool customParams, int customParamCount, OperandX64 customArgs, int pcpos, Label* labelarr)
{
int bfid = LUAU_INSN_A(*pc);
int skip = LUAU_INSN_C(*pc);
Instruction call = pc[skip + 1];
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
int ra = LUAU_INSN_A(call);
int nparams = customParams ? customParamCount : LUAU_INSN_B(call) - 1;
int nresults = LUAU_INSN_C(call) - 1;
int arg = customParams ? LUAU_INSN_B(*pc) : ra + 1;
OperandX64 args = customParams ? customArgs : luauRegValue(ra + 2);
Label exit;
jumpIfUnsafeEnv(build, rax, exit);
BuiltinImplResult br = emitBuiltin(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, exit);
if (br.type == BuiltinImplType::UsesFallback)
{
if (nresults == LUA_MULTRET)
{
// L->top = ra + n;
build.lea(rax, qword[rBase + (ra + br.actualResultCount) * sizeof(TValue)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
}
else if (nparams == LUA_MULTRET)
{
// L->top = L->ci->top;
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.mov(rax, qword[rax + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
}
// TODO: once we start outlining the fallback, we will be able to fallthrough to the next instruction
build.jmp(labelarr[pcpos + skip + 2]);
build.setLabel(exit);
return;
}
// TODO: we can skip saving pc for some well-behaved builtins which we didn't inline
emitSetSavedPc(build, pcpos); // uses rax/rdx
build.mov(rax, qword[rNativeContext + offsetof(NativeContext, luauF_table) + bfid * sizeof(luau_FastFunction)]);
// 5th parameter (args) is left unset for LOP_FASTCALL1
if (args.cat == CategoryX64::mem)
{
if (getCurrentX64ABI() == X64ABI::Windows)
{
build.lea(rcx, args);
build.mov(sArg5, rcx);
}
else
{
build.lea(rArg5, args);
}
}
if (nparams == LUA_MULTRET)
{
// TODO: for SystemV ABI we can compute the result directly into rArg6
// L->top - (ra + 1)
build.mov(rcx, qword[rState + offsetof(lua_State, top)]);
build.lea(rdx, qword[rBase + (ra + 1) * sizeof(TValue)]);
build.sub(rcx, rdx);
build.shr(rcx, kTValueSizeLog2);
if (getCurrentX64ABI() == X64ABI::Windows)
build.mov(sArg6, rcx);
else
build.mov(rArg6, rcx);
}
else
{
if (getCurrentX64ABI() == X64ABI::Windows)
build.mov(sArg6, nparams);
else
build.mov(rArg6, nparams);
}
build.mov(rArg1, rState);
build.lea(rArg2, luauRegValue(ra));
build.lea(rArg3, luauRegValue(arg));
build.mov(rArg4, nresults);
build.call(rax);
build.test(eax, eax); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(Condition::Less, exit); // jl jumps if SF != OF
if (nresults == LUA_MULTRET)
{
// L->top = ra + n;
build.shl(rax, kTValueSizeLog2);
build.lea(rax, qword[rBase + rax + ra * sizeof(TValue)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
}
else if (nparams == LUA_MULTRET)
{
// L->top = L->ci->top;
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.mov(rax, qword[rax + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
}
build.jmp(labelarr[pcpos + skip + 2]);
build.setLabel(exit);
// TODO: fallback to LOP_CALL after a fast call should be outlined
}
void emitInstFastCall1(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr)
{
emitInstFastCallN(build, pc, /* customParams */ true, /* customParamCount */ 1, /* customArgs */ 0, pcpos, labelarr);
}
void emitInstFastCall2(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr)
{
emitInstFastCallN(build, pc, /* customParams */ true, /* customParamCount */ 2, /* customArgs */ luauRegValue(pc[1]), pcpos, labelarr);
}
void emitInstFastCall2K(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr)
{
emitInstFastCallN(build, pc, /* customParams */ true, /* customParamCount */ 2, /* customArgs */ luauConstantValue(pc[1]), pcpos, labelarr);
}
void emitInstFastCall(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr)
{
emitInstFastCallN(build, pc, /* customParams */ false, /* customParamCount */ 0, /* customArgs */ 0, pcpos, labelarr);
}
void emitInstForNPrep(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr)
{
int ra = LUAU_INSN_A(*pc);
Label& loopExit = labelarr[pcpos + LUAU_INSN_D(*pc) + 1];
Label tryConvert, exit;
jumpIfTagIsNot(build, ra + 0, LUA_TNUMBER, tryConvert);
jumpIfTagIsNot(build, ra + 1, LUA_TNUMBER, tryConvert);
jumpIfTagIsNot(build, ra + 2, LUA_TNUMBER, tryConvert);
// After successful conversion of arguments to number, we return here
Label retry = build.setLabel();
RegisterX64 limit = xmm0;
RegisterX64 step = xmm1;
RegisterX64 idx = xmm2;
RegisterX64 zero = xmm3;
build.vxorpd(zero, xmm0, xmm0);
build.vmovsd(limit, luauRegValue(ra + 0));
build.vmovsd(step, luauRegValue(ra + 1));
build.vmovsd(idx, luauRegValue(ra + 2));
Label reverse;
// step <= 0
jumpOnNumberCmp(build, noreg, step, zero, Condition::LessEqual, reverse);
// TODO: target branches can probably be arranged better, but we need tests for NaN behavior preservation
// false: idx <= limit
jumpOnNumberCmp(build, noreg, idx, limit, Condition::LessEqual, exit);
build.jmp(loopExit);
// true: limit <= idx
build.setLabel(reverse);
jumpOnNumberCmp(build, noreg, limit, idx, Condition::LessEqual, exit);
build.jmp(loopExit);
// TOOD: place at the end of the function
build.setLabel(tryConvert);
callPrepareForN(build, ra + 0, ra + 1, ra + 2, pcpos);
build.jmp(retry);
build.setLabel(exit);
}
void emitInstForNLoop(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr)
{
emitInterrupt(build, pcpos);
int ra = LUAU_INSN_A(*pc);
Label& loopRepeat = labelarr[pcpos + LUAU_INSN_D(*pc) + 1];
RegisterX64 limit = xmm0;
RegisterX64 step = xmm1;
RegisterX64 idx = xmm2;
RegisterX64 zero = xmm3;
build.vxorpd(zero, xmm0, xmm0);
build.vmovsd(limit, luauRegValue(ra + 0));
build.vmovsd(step, luauRegValue(ra + 1));
build.vmovsd(idx, luauRegValue(ra + 2));
build.vaddsd(idx, idx, step);
build.vmovsd(luauRegValue(ra + 2), idx);
Label reverse, exit;
// step <= 0
jumpOnNumberCmp(build, noreg, step, zero, Condition::LessEqual, reverse);
// false: idx <= limit
jumpOnNumberCmp(build, noreg, idx, limit, Condition::LessEqual, loopRepeat);
build.jmp(exit);
// true: limit <= idx
build.setLabel(reverse);
jumpOnNumberCmp(build, noreg, limit, idx, Condition::LessEqual, loopRepeat);
build.setLabel(exit);
}
static void emitInstAndX(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c)
{
Label target, fallthrough;
jumpIfFalsy(build, rb, target, fallthrough);
build.setLabel(fallthrough);
build.vmovups(xmm0, c);
build.vmovups(luauReg(ra), xmm0);
if (ra == rb)
{
build.setLabel(target);
}
else
{
Label exit;
build.jmp(exit);
build.setLabel(target);
build.vmovups(xmm0, luauReg(rb));
build.vmovups(luauReg(ra), xmm0);
build.setLabel(exit);
}
}
void emitInstAnd(AssemblyBuilderX64& build, const Instruction* pc)
{
emitInstAndX(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauReg(LUAU_INSN_C(*pc)));
}
void emitInstAndK(AssemblyBuilderX64& build, const Instruction* pc)
{
emitInstAndX(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauConstant(LUAU_INSN_C(*pc)));
}
static void emitInstOrX(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c)
{
Label target, fallthrough;
jumpIfTruthy(build, rb, target, fallthrough);
build.setLabel(fallthrough);
build.vmovups(xmm0, c);
build.vmovups(luauReg(ra), xmm0);
if (ra == rb)
{
build.setLabel(target);
}
else
{
Label exit;
build.jmp(exit);
build.setLabel(target);
build.vmovups(xmm0, luauReg(rb));
build.vmovups(luauReg(ra), xmm0);
build.setLabel(exit);
}
}
void emitInstOr(AssemblyBuilderX64& build, const Instruction* pc)
{
emitInstOrX(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauReg(LUAU_INSN_C(*pc)));
}
void emitInstOrK(AssemblyBuilderX64& build, const Instruction* pc)
{
emitInstOrX(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauConstant(LUAU_INSN_C(*pc)));
}
void emitInstGetTableN(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
int c = LUAU_INSN_C(*pc);
Label fallback, exit;
jumpIfTagIsNot(build, rb, LUA_TTABLE, fallback);
RegisterX64 table = rcx;
build.mov(table, luauRegValue(rb));
// unsigned(c) < unsigned(h->sizearray)
build.cmp(dword[table + offsetof(Table, sizearray)], c);
build.jcc(Condition::BelowEqual, fallback);
jumpIfMetatablePresent(build, table, fallback);
build.mov(rax, qword[table + offsetof(Table, array)]);
setLuauReg(build, xmm0, ra, xmmword[rax + c * sizeof(TValue)]);
build.jmp(exit);
// slow-path
// TODO: move to the end of the function
build.setLabel(fallback);
TValue n;
setnvalue(&n, c + 1);
callGetTable(build, rb, build.bytes(&n, sizeof(n)), ra, pcpos);
build.setLabel(exit);
}
void emitInstSetTableN(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
int c = LUAU_INSN_C(*pc);
Label fallback, exit;
jumpIfTagIsNot(build, rb, LUA_TTABLE, fallback);
RegisterX64 table = rcx;
build.mov(table, luauRegValue(rb));
// unsigned(c) < unsigned(h->sizearray)
build.cmp(dword[table + offsetof(Table, sizearray)], c);
build.jcc(Condition::BelowEqual, fallback);
jumpIfMetatablePresent(build, table, fallback);
jumpIfTableIsReadOnly(build, table, fallback);
// setobj2t(L, &h->array[c], ra);
build.mov(rax, qword[table + offsetof(Table, array)]);
build.vmovups(xmm0, luauReg(ra));
build.vmovups(xmmword[rax + c * sizeof(TValue)], xmm0);
callBarrierTable(build, rax, table, ra, exit);
build.jmp(exit);
// slow-path
// TODO: move to the end of the function
build.setLabel(fallback);
TValue n;
setnvalue(&n, c + 1);
callSetTable(build, rb, build.bytes(&n, sizeof(n)), ra, pcpos);
build.setLabel(exit);
}
void emitInstGetTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
int rc = LUAU_INSN_C(*pc);
Label fallback, exit;
jumpIfTagIsNot(build, rb, LUA_TTABLE, fallback);
jumpIfTagIsNot(build, rc, LUA_TNUMBER, fallback);
// fast-path: table with a number index
RegisterX64 table = rcx;
build.mov(table, luauRegValue(rb));
convertNumberToIndexOrJump(build, xmm1, xmm0, eax, rc, fallback);
// index - 1
build.dec(eax);
// unsigned(index - 1) < unsigned(h->sizearray)
build.cmp(dword[table + offsetof(Table, sizearray)], eax);
build.jcc(Condition::BelowEqual, fallback);
jumpIfMetatablePresent(build, table, fallback);
// setobj2s(L, ra, &h->array[unsigned(index - 1)]);
build.mov(rdx, qword[table + offsetof(Table, array)]);
build.shl(eax, kTValueSizeLog2);
setLuauReg(build, xmm0, ra, xmmword[rdx + rax]);
build.jmp(exit);
build.setLabel(fallback);
// slow-path
// TODO: move to the end of the function
callGetTable(build, rb, luauRegValue(rc), ra, pcpos);
build.setLabel(exit);
}
void emitInstSetTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpos)
{
int ra = LUAU_INSN_A(*pc);
int rb = LUAU_INSN_B(*pc);
int rc = LUAU_INSN_C(*pc);
Label fallback, exit;
jumpIfTagIsNot(build, rb, LUA_TTABLE, fallback);
jumpIfTagIsNot(build, rc, LUA_TNUMBER, fallback);
// fast-path: table with a number index
RegisterX64 table = rcx;
build.mov(table, luauRegValue(rb));
convertNumberToIndexOrJump(build, xmm1, xmm0, eax, rc, fallback);
// index - 1
build.dec(eax);
// unsigned(index - 1) < unsigned(h->sizearray)
build.cmp(dword[table + offsetof(Table, sizearray)], eax);
build.jcc(Condition::BelowEqual, fallback);
jumpIfMetatablePresent(build, table, fallback);
jumpIfTableIsReadOnly(build, table, fallback);
// setobj2t(L, &h->array[unsigned(index - 1)], ra);
build.mov(rdx, qword[table + offsetof(Table, array)]);
build.shl(eax, kTValueSizeLog2);
build.vmovups(xmm0, luauReg(ra));
build.vmovups(xmmword[rdx + rax], xmm0);
callBarrierTable(build, rdx, table, ra, exit);
build.jmp(exit);
build.setLabel(fallback);
// slow-path
// TODO: move to the end of the function
callSetTable(build, rb, luauRegValue(rc), ra, pcpos);
build.setLabel(exit);
}
} // namespace CodeGen
} // namespace Luau

View File

@ -0,0 +1,66 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <stdint.h>
typedef uint32_t Instruction;
typedef struct lua_TValue TValue;
namespace Luau
{
namespace CodeGen
{
class AssemblyBuilderX64;
enum class Condition;
struct Label;
struct NativeState;
void emitInstLoadNil(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc);
void emitInstLoadB(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstLoadN(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc);
void emitInstLoadK(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k);
void emitInstMove(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc);
void emitInstJump(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstJumpBack(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstJumpIf(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr, bool not_);
void emitInstJumpIfEq(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr, bool not_);
void emitInstJumpIfCond(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr, Condition cond);
void emitInstJumpX(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstJumpxEqNil(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr);
void emitInstJumpxEqB(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr);
void emitInstJumpxEqN(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr);
void emitInstJumpxEqS(AssemblyBuilderX64& build, NativeState& data, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr);
void emitInstAdd(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstSub(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstMul(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstDiv(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstMod(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstPow(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstAddK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos);
void emitInstSubK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos);
void emitInstMulK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos);
void emitInstDivK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos);
void emitInstModK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos);
void emitInstPowK(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos);
void emitInstNot(AssemblyBuilderX64& build, const Instruction* pc);
void emitInstMinus(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstLength(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstGetUpval(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstFastCall1(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstFastCall2(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstFastCall2K(AssemblyBuilderX64& build, const Instruction* pc, const TValue* k, int pcpos, Label* labelarr);
void emitInstFastCall(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstForNPrep(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstForNLoop(AssemblyBuilderX64& build, const Instruction* pc, int pcpos, Label* labelarr);
void emitInstAnd(AssemblyBuilderX64& build, const Instruction* pc);
void emitInstAndK(AssemblyBuilderX64& build, const Instruction* pc);
void emitInstOr(AssemblyBuilderX64& build, const Instruction* pc);
void emitInstOrK(AssemblyBuilderX64& build, const Instruction* pc);
void emitInstGetTableN(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstSetTableN(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstGetTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
void emitInstSetTable(AssemblyBuilderX64& build, const Instruction* pc, int pcpos);
} // namespace CodeGen
} // namespace Luau

View File

@ -4,15 +4,17 @@
#include "Fallbacks.h"
#include "FallbacksProlog.h"
const Instruction* execute_LOP_NOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_NOP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
LUAU_ASSERT(insn == 0);
return pc;
}
const Instruction* execute_LOP_LOADNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_LOADNIL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -20,8 +22,9 @@ const Instruction* execute_LOP_LOADNIL(lua_State* L, const Instruction* pc, Clos
return pc;
}
const Instruction* execute_LOP_LOADB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_LOADB(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -32,8 +35,9 @@ const Instruction* execute_LOP_LOADB(lua_State* L, const Instruction* pc, Closur
return pc;
}
const Instruction* execute_LOP_LOADN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_LOADN(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -41,8 +45,9 @@ const Instruction* execute_LOP_LOADN(lua_State* L, const Instruction* pc, Closur
return pc;
}
const Instruction* execute_LOP_LOADK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_LOADK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
TValue* kv = VM_KV(LUAU_INSN_D(insn));
@ -51,8 +56,9 @@ const Instruction* execute_LOP_LOADK(lua_State* L, const Instruction* pc, Closur
return pc;
}
const Instruction* execute_LOP_MOVE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_MOVE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -61,8 +67,9 @@ const Instruction* execute_LOP_MOVE(lua_State* L, const Instruction* pc, Closure
return pc;
}
const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
uint32_t aux = *pc++;
@ -92,8 +99,9 @@ const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, Cl
}
}
const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
uint32_t aux = *pc++;
@ -124,8 +132,9 @@ const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, Cl
}
}
const Instruction* execute_LOP_GETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_GETUPVAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
TValue* ur = VM_UV(LUAU_INSN_B(insn));
@ -135,8 +144,9 @@ const Instruction* execute_LOP_GETUPVAL(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_SETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_SETUPVAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
TValue* ur = VM_UV(LUAU_INSN_B(insn));
@ -147,8 +157,9 @@ const Instruction* execute_LOP_SETUPVAL(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_CLOSEUPVALS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_CLOSEUPVALS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -157,8 +168,9 @@ const Instruction* execute_LOP_CLOSEUPVALS(lua_State* L, const Instruction* pc,
return pc;
}
const Instruction* execute_LOP_GETIMPORT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_GETIMPORT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
TValue* kv = VM_KV(LUAU_INSN_D(insn));
@ -183,8 +195,9 @@ const Instruction* execute_LOP_GETIMPORT(lua_State* L, const Instruction* pc, Cl
}
}
const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -300,8 +313,9 @@ const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, C
return pc;
}
const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -376,8 +390,9 @@ const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, C
}
}
const Instruction* execute_LOP_GETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_GETTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -406,8 +421,9 @@ const Instruction* execute_LOP_GETTABLE(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_SETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_SETTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -437,8 +453,9 @@ const Instruction* execute_LOP_SETTABLE(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_GETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_GETTABLEN(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -465,8 +482,9 @@ const Instruction* execute_LOP_GETTABLEN(lua_State* L, const Instruction* pc, Cl
return pc;
}
const Instruction* execute_LOP_SETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_SETTABLEN(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -494,8 +512,9 @@ const Instruction* execute_LOP_SETTABLEN(lua_State* L, const Instruction* pc, Cl
return pc;
}
const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -527,6 +546,7 @@ const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, C
default:
LUAU_ASSERT(!"Unknown upvalue capture type");
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
}
}
@ -534,8 +554,9 @@ const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, C
return pc;
}
const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -641,8 +662,9 @@ const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_CALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_CALL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
VM_INTERRUPT();
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -734,8 +756,9 @@ const Instruction* execute_LOP_CALL(lua_State* L, const Instruction* pc, Closure
}
}
const Instruction* execute_LOP_RETURN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_RETURN(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
VM_INTERRUPT();
Instruction insn = *pc++;
StkId ra = &base[LUAU_INSN_A(insn)]; // note: this can point to L->top if b == LUA_MULTRET making VM_REG unsafe to use
@ -785,8 +808,9 @@ const Instruction* execute_LOP_RETURN(lua_State* L, const Instruction* pc, Closu
return pc;
}
const Instruction* execute_LOP_JUMP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
pc += LUAU_INSN_D(insn);
@ -794,8 +818,9 @@ const Instruction* execute_LOP_JUMP(lua_State* L, const Instruction* pc, Closure
return pc;
}
const Instruction* execute_LOP_JUMPIF(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPIF(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -804,8 +829,9 @@ const Instruction* execute_LOP_JUMPIF(lua_State* L, const Instruction* pc, Closu
return pc;
}
const Instruction* execute_LOP_JUMPIFNOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPIFNOT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -814,8 +840,9 @@ const Instruction* execute_LOP_JUMPIFNOT(lua_State* L, const Instruction* pc, Cl
return pc;
}
const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -906,7 +933,9 @@ const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, Clo
// slow path after switch()
break;
default:;
default:
LUAU_ASSERT(!"Unknown value type");
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
}
// slow-path: tables with metatables and userdata values
@ -926,8 +955,9 @@ const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, Clo
}
}
const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -1018,7 +1048,9 @@ const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc,
// slow path after switch()
break;
default:;
default:
LUAU_ASSERT(!"Unknown value type");
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
}
// slow-path: tables with metatables and userdata values
@ -1038,8 +1070,9 @@ const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc,
}
}
const Instruction* execute_LOP_JUMPIFLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPIFLE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -1071,8 +1104,9 @@ const Instruction* execute_LOP_JUMPIFLE(lua_State* L, const Instruction* pc, Clo
}
}
const Instruction* execute_LOP_JUMPIFNOTLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPIFNOTLE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -1104,8 +1138,9 @@ const Instruction* execute_LOP_JUMPIFNOTLE(lua_State* L, const Instruction* pc,
}
}
const Instruction* execute_LOP_JUMPIFLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPIFLT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -1137,8 +1172,9 @@ const Instruction* execute_LOP_JUMPIFLT(lua_State* L, const Instruction* pc, Clo
}
}
const Instruction* execute_LOP_JUMPIFNOTLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPIFNOTLT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -1170,8 +1206,9 @@ const Instruction* execute_LOP_JUMPIFNOTLT(lua_State* L, const Instruction* pc,
}
}
const Instruction* execute_LOP_ADD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_ADD(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1216,8 +1253,9 @@ const Instruction* execute_LOP_ADD(lua_State* L, const Instruction* pc, Closure*
}
}
const Instruction* execute_LOP_SUB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_SUB(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1262,8 +1300,9 @@ const Instruction* execute_LOP_SUB(lua_State* L, const Instruction* pc, Closure*
}
}
const Instruction* execute_LOP_MUL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_MUL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1323,8 +1362,9 @@ const Instruction* execute_LOP_MUL(lua_State* L, const Instruction* pc, Closure*
}
}
const Instruction* execute_LOP_DIV(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_DIV(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1384,8 +1424,9 @@ const Instruction* execute_LOP_DIV(lua_State* L, const Instruction* pc, Closure*
}
}
const Instruction* execute_LOP_MOD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_MOD(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1407,8 +1448,9 @@ const Instruction* execute_LOP_MOD(lua_State* L, const Instruction* pc, Closure*
}
}
const Instruction* execute_LOP_POW(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_POW(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1428,8 +1470,9 @@ const Instruction* execute_LOP_POW(lua_State* L, const Instruction* pc, Closure*
}
}
const Instruction* execute_LOP_ADDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_ADDK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1449,8 +1492,9 @@ const Instruction* execute_LOP_ADDK(lua_State* L, const Instruction* pc, Closure
}
}
const Instruction* execute_LOP_SUBK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_SUBK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1470,8 +1514,9 @@ const Instruction* execute_LOP_SUBK(lua_State* L, const Instruction* pc, Closure
}
}
const Instruction* execute_LOP_MULK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_MULK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1516,8 +1561,9 @@ const Instruction* execute_LOP_MULK(lua_State* L, const Instruction* pc, Closure
}
}
const Instruction* execute_LOP_DIVK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_DIVK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1562,8 +1608,9 @@ const Instruction* execute_LOP_DIVK(lua_State* L, const Instruction* pc, Closure
}
}
const Instruction* execute_LOP_MODK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_MODK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1585,8 +1632,9 @@ const Instruction* execute_LOP_MODK(lua_State* L, const Instruction* pc, Closure
}
}
const Instruction* execute_LOP_POWK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_POWK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1612,8 +1660,9 @@ const Instruction* execute_LOP_POWK(lua_State* L, const Instruction* pc, Closure
}
}
const Instruction* execute_LOP_AND(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_AND(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1623,8 +1672,9 @@ const Instruction* execute_LOP_AND(lua_State* L, const Instruction* pc, Closure*
return pc;
}
const Instruction* execute_LOP_OR(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_OR(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1634,8 +1684,9 @@ const Instruction* execute_LOP_OR(lua_State* L, const Instruction* pc, Closure*
return pc;
}
const Instruction* execute_LOP_ANDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_ANDK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1645,8 +1696,9 @@ const Instruction* execute_LOP_ANDK(lua_State* L, const Instruction* pc, Closure
return pc;
}
const Instruction* execute_LOP_ORK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_ORK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1656,8 +1708,9 @@ const Instruction* execute_LOP_ORK(lua_State* L, const Instruction* pc, Closure*
return pc;
}
const Instruction* execute_LOP_CONCAT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_CONCAT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int b = LUAU_INSN_B(insn);
int c = LUAU_INSN_C(insn);
@ -1672,8 +1725,9 @@ const Instruction* execute_LOP_CONCAT(lua_State* L, const Instruction* pc, Closu
return pc;
}
const Instruction* execute_LOP_NOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_NOT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1683,8 +1737,9 @@ const Instruction* execute_LOP_NOT(lua_State* L, const Instruction* pc, Closure*
return pc;
}
const Instruction* execute_LOP_MINUS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_MINUS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1726,8 +1781,9 @@ const Instruction* execute_LOP_MINUS(lua_State* L, const Instruction* pc, Closur
}
}
const Instruction* execute_LOP_LENGTH(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_LENGTH(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
@ -1764,8 +1820,9 @@ const Instruction* execute_LOP_LENGTH(lua_State* L, const Instruction* pc, Closu
}
}
const Instruction* execute_LOP_NEWTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_NEWTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
int b = LUAU_INSN_B(insn);
@ -1776,8 +1833,9 @@ const Instruction* execute_LOP_NEWTABLE(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_DUPTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_DUPTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
TValue* kv = VM_KV(LUAU_INSN_D(insn));
@ -1787,8 +1845,9 @@ const Instruction* execute_LOP_DUPTABLE(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = &base[LUAU_INSN_B(insn)]; // note: this can point to L->top if c == LUA_MULTRET making VM_REG unsafe to use
@ -1819,8 +1878,9 @@ const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, Clos
return pc;
}
const Instruction* execute_LOP_FORNPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FORNPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -1843,8 +1903,9 @@ const Instruction* execute_LOP_FORNPREP(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_FORNLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FORNLOOP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
VM_INTERRUPT();
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -1870,8 +1931,9 @@ const Instruction* execute_LOP_FORNLOOP(lua_State* L, const Instruction* pc, Clo
}
}
const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -1926,8 +1988,9 @@ const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_FORGLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FORGLOOP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
VM_INTERRUPT();
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -2026,8 +2089,9 @@ const Instruction* execute_LOP_FORGLOOP(lua_State* L, const Instruction* pc, Clo
}
}
const Instruction* execute_LOP_FORGPREP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FORGPREP_INEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -2048,14 +2112,16 @@ const Instruction* execute_LOP_FORGPREP_INEXT(lua_State* L, const Instruction* p
return pc;
}
const Instruction* execute_LOP_DEP_FORGLOOP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_DEP_FORGLOOP_INEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
LUAU_ASSERT(!"Unsupported deprecated opcode");
LUAU_UNREACHABLE();
}
const Instruction* execute_LOP_FORGPREP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FORGPREP_NEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -2076,14 +2142,16 @@ const Instruction* execute_LOP_FORGPREP_NEXT(lua_State* L, const Instruction* pc
return pc;
}
const Instruction* execute_LOP_DEP_FORGLOOP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_DEP_FORGLOOP_NEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
LUAU_ASSERT(!"Unsupported deprecated opcode");
LUAU_UNREACHABLE();
}
const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int b = LUAU_INSN_B(insn) - 1;
int n = cast_int(base - L->ci->func) - cl->l.p->numparams - 1;
@ -2111,8 +2179,9 @@ const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, C
}
}
const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
TValue* kv = VM_KV(LUAU_INSN_D(insn));
@ -2166,8 +2235,9 @@ const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, C
return pc;
}
const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int numparams = LUAU_INSN_A(insn);
@ -2196,8 +2266,9 @@ const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc,
return pc;
}
const Instruction* execute_LOP_JUMPBACK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPBACK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
VM_INTERRUPT();
Instruction insn = *pc++;
@ -2206,8 +2277,9 @@ const Instruction* execute_LOP_JUMPBACK(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_LOADKX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_LOADKX(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
uint32_t aux = *pc++;
@ -2217,8 +2289,9 @@ const Instruction* execute_LOP_LOADKX(lua_State* L, const Instruction* pc, Closu
return pc;
}
const Instruction* execute_LOP_JUMPX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPX(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
VM_INTERRUPT();
Instruction insn = *pc++;
@ -2227,8 +2300,9 @@ const Instruction* execute_LOP_JUMPX(lua_State* L, const Instruction* pc, Closur
return pc;
}
const Instruction* execute_LOP_FASTCALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FASTCALL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int bfid = LUAU_INSN_A(insn);
int skip = LUAU_INSN_C(insn);
@ -2273,8 +2347,9 @@ const Instruction* execute_LOP_FASTCALL(lua_State* L, const Instruction* pc, Clo
}
}
const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int hits = LUAU_INSN_E(insn);
@ -2285,26 +2360,30 @@ const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, Clo
return pc;
}
const Instruction* execute_LOP_CAPTURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_CAPTURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
LUAU_ASSERT(!"CAPTURE is a pseudo-opcode and must be executed as part of NEWCLOSURE");
LUAU_UNREACHABLE();
}
const Instruction* execute_LOP_DEP_JUMPIFEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_DEP_JUMPIFEQK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
LUAU_ASSERT(!"Unsupported deprecated opcode");
LUAU_UNREACHABLE();
}
const Instruction* execute_LOP_DEP_JUMPIFNOTEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_DEP_JUMPIFNOTEQK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
LUAU_ASSERT(!"Unsupported deprecated opcode");
LUAU_UNREACHABLE();
}
const Instruction* execute_LOP_FASTCALL1(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FASTCALL1(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int bfid = LUAU_INSN_A(insn);
TValue* arg = VM_REG(LUAU_INSN_B(insn));
@ -2349,8 +2428,9 @@ const Instruction* execute_LOP_FASTCALL1(lua_State* L, const Instruction* pc, Cl
}
}
const Instruction* execute_LOP_FASTCALL2(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FASTCALL2(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int bfid = LUAU_INSN_A(insn);
int skip = LUAU_INSN_C(insn) - 1;
@ -2397,8 +2477,9 @@ const Instruction* execute_LOP_FASTCALL2(lua_State* L, const Instruction* pc, Cl
}
}
const Instruction* execute_LOP_FASTCALL2K(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_FASTCALL2K(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int bfid = LUAU_INSN_A(insn);
int skip = LUAU_INSN_C(insn) - 1;
@ -2445,14 +2526,15 @@ const Instruction* execute_LOP_FASTCALL2K(lua_State* L, const Instruction* pc, C
}
}
const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
LUAU_ASSERT(!"Unsupported deprecated opcode");
LUAU_UNREACHABLE();
}
const Instruction* execute_LOP_JUMPXEQKNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPXEQKNIL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -2464,8 +2546,9 @@ const Instruction* execute_LOP_JUMPXEQKNIL(lua_State* L, const Instruction* pc,
return pc;
}
const Instruction* execute_LOP_JUMPXEQKB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPXEQKB(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -2475,8 +2558,9 @@ const Instruction* execute_LOP_JUMPXEQKB(lua_State* L, const Instruction* pc, Cl
return pc;
}
const Instruction* execute_LOP_JUMPXEQKN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPXEQKN(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));
@ -2497,8 +2581,9 @@ const Instruction* execute_LOP_JUMPXEQKN(lua_State* L, const Instruction* pc, Cl
return pc;
}
const Instruction* execute_LOP_JUMPXEQKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)
const Instruction* execute_LOP_JUMPXEQKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
uint32_t aux = *pc;
StkId ra = VM_REG(LUAU_INSN_A(insn));

View File

@ -10,84 +10,84 @@ typedef uint32_t Instruction;
typedef struct lua_TValue TValue;
typedef TValue* StkId;
const Instruction* execute_LOP_NOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MOVE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_CLOSEUPVALS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETIMPORT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_CALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_RETURN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIF(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_ADD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SUB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MUL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DIV(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MOD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_POW(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_ADDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SUBK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MULK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DIVK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MODK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_POWK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_AND(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_OR(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_ANDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_ORK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_CONCAT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_NOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MINUS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LENGTH(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_NEWTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DUPTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORNPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORNLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORGLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_FORGLOOP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_FORGLOOP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPBACK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADKX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_CAPTURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_JUMPIFEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_JUMPIFNOTEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL1(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL2(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL2K(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_NOP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADNIL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADB(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MOVE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETUPVAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETUPVAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_CLOSEUPVALS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETIMPORT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLEN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLEN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_CALL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_RETURN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIF(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFLT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTLT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_ADD(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SUB(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MUL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DIV(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MOD(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_POW(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_ADDK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SUBK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MULK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DIVK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MODK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_POWK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_AND(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_OR(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_ANDK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_ORK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_CONCAT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_NOT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_MINUS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LENGTH(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_NEWTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DUPTABLE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORNPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORNLOOP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORGLOOP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP_INEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_FORGLOOP_INEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP_NEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_FORGLOOP_NEXT(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPBACK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_LOADKX(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPX(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_CAPTURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_JUMPIFEQK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_JUMPIFNOTEQK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL1(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL2(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL2K(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKNIL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKB(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKN(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);

122
CodeGen/src/NativeState.cpp Normal file
View File

@ -0,0 +1,122 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "NativeState.h"
#include "Luau/UnwindBuilder.h"
#include "CustomExecUtils.h"
#include "Fallbacks.h"
#include "lbuiltins.h"
#include "lgc.h"
#include "ltable.h"
#include "lvm.h"
#include <math.h>
#define CODEGEN_SET_FALLBACK(op, flags) data.context.fallback[op] = {execute_##op, flags}
#define CODEGEN_SET_NAME(op) data.names[op] = #op
// Similar to a dispatch table in lvmexecute.cpp
#define CODEGEN_SET_NAMES() \
CODEGEN_SET_NAME(LOP_NOP), CODEGEN_SET_NAME(LOP_BREAK), CODEGEN_SET_NAME(LOP_LOADNIL), CODEGEN_SET_NAME(LOP_LOADB), CODEGEN_SET_NAME(LOP_LOADN), \
CODEGEN_SET_NAME(LOP_LOADK), CODEGEN_SET_NAME(LOP_MOVE), CODEGEN_SET_NAME(LOP_GETGLOBAL), CODEGEN_SET_NAME(LOP_SETGLOBAL), \
CODEGEN_SET_NAME(LOP_GETUPVAL), CODEGEN_SET_NAME(LOP_SETUPVAL), CODEGEN_SET_NAME(LOP_CLOSEUPVALS), CODEGEN_SET_NAME(LOP_GETIMPORT), \
CODEGEN_SET_NAME(LOP_GETTABLE), CODEGEN_SET_NAME(LOP_SETTABLE), CODEGEN_SET_NAME(LOP_GETTABLEKS), CODEGEN_SET_NAME(LOP_SETTABLEKS), \
CODEGEN_SET_NAME(LOP_GETTABLEN), CODEGEN_SET_NAME(LOP_SETTABLEN), CODEGEN_SET_NAME(LOP_NEWCLOSURE), CODEGEN_SET_NAME(LOP_NAMECALL), \
CODEGEN_SET_NAME(LOP_CALL), CODEGEN_SET_NAME(LOP_RETURN), CODEGEN_SET_NAME(LOP_JUMP), CODEGEN_SET_NAME(LOP_JUMPBACK), \
CODEGEN_SET_NAME(LOP_JUMPIF), CODEGEN_SET_NAME(LOP_JUMPIFNOT), CODEGEN_SET_NAME(LOP_JUMPIFEQ), CODEGEN_SET_NAME(LOP_JUMPIFLE), \
CODEGEN_SET_NAME(LOP_JUMPIFLT), CODEGEN_SET_NAME(LOP_JUMPIFNOTEQ), CODEGEN_SET_NAME(LOP_JUMPIFNOTLE), CODEGEN_SET_NAME(LOP_JUMPIFNOTLT), \
CODEGEN_SET_NAME(LOP_ADD), CODEGEN_SET_NAME(LOP_SUB), CODEGEN_SET_NAME(LOP_MUL), CODEGEN_SET_NAME(LOP_DIV), CODEGEN_SET_NAME(LOP_MOD), \
CODEGEN_SET_NAME(LOP_POW), CODEGEN_SET_NAME(LOP_ADDK), CODEGEN_SET_NAME(LOP_SUBK), CODEGEN_SET_NAME(LOP_MULK), CODEGEN_SET_NAME(LOP_DIVK), \
CODEGEN_SET_NAME(LOP_MODK), CODEGEN_SET_NAME(LOP_POWK), CODEGEN_SET_NAME(LOP_AND), CODEGEN_SET_NAME(LOP_OR), CODEGEN_SET_NAME(LOP_ANDK), \
CODEGEN_SET_NAME(LOP_ORK), CODEGEN_SET_NAME(LOP_CONCAT), CODEGEN_SET_NAME(LOP_NOT), CODEGEN_SET_NAME(LOP_MINUS), \
CODEGEN_SET_NAME(LOP_LENGTH), CODEGEN_SET_NAME(LOP_NEWTABLE), CODEGEN_SET_NAME(LOP_DUPTABLE), CODEGEN_SET_NAME(LOP_SETLIST), \
CODEGEN_SET_NAME(LOP_FORNPREP), CODEGEN_SET_NAME(LOP_FORNLOOP), CODEGEN_SET_NAME(LOP_FORGLOOP), CODEGEN_SET_NAME(LOP_FORGPREP_INEXT), \
CODEGEN_SET_NAME(LOP_DEP_FORGLOOP_INEXT), CODEGEN_SET_NAME(LOP_FORGPREP_NEXT), CODEGEN_SET_NAME(LOP_DEP_FORGLOOP_NEXT), \
CODEGEN_SET_NAME(LOP_GETVARARGS), CODEGEN_SET_NAME(LOP_DUPCLOSURE), CODEGEN_SET_NAME(LOP_PREPVARARGS), CODEGEN_SET_NAME(LOP_LOADKX), \
CODEGEN_SET_NAME(LOP_JUMPX), CODEGEN_SET_NAME(LOP_FASTCALL), CODEGEN_SET_NAME(LOP_COVERAGE), CODEGEN_SET_NAME(LOP_CAPTURE), \
CODEGEN_SET_NAME(LOP_DEP_JUMPIFEQK), CODEGEN_SET_NAME(LOP_DEP_JUMPIFNOTEQK), CODEGEN_SET_NAME(LOP_FASTCALL1), \
CODEGEN_SET_NAME(LOP_FASTCALL2), CODEGEN_SET_NAME(LOP_FASTCALL2K), CODEGEN_SET_NAME(LOP_FORGPREP), CODEGEN_SET_NAME(LOP_JUMPXEQKNIL), \
CODEGEN_SET_NAME(LOP_JUMPXEQKB), CODEGEN_SET_NAME(LOP_JUMPXEQKN), CODEGEN_SET_NAME(LOP_JUMPXEQKS)
static int luauF_missing(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
return -1;
}
namespace Luau
{
namespace CodeGen
{
constexpr unsigned kBlockSize = 4 * 1024 * 1024;
constexpr unsigned kMaxTotalSize = 256 * 1024 * 1024;
NativeState::NativeState()
: codeAllocator(kBlockSize, kMaxTotalSize)
{
}
NativeState::~NativeState() = default;
void initFallbackTable(NativeState& data)
{
CODEGEN_SET_FALLBACK(LOP_GETGLOBAL, 0);
CODEGEN_SET_FALLBACK(LOP_SETGLOBAL, 0);
CODEGEN_SET_FALLBACK(LOP_SETUPVAL, 0);
CODEGEN_SET_FALLBACK(LOP_CLOSEUPVALS, 0);
CODEGEN_SET_FALLBACK(LOP_GETIMPORT, 0);
CODEGEN_SET_FALLBACK(LOP_GETTABLEKS, 0);
CODEGEN_SET_FALLBACK(LOP_SETTABLEKS, 0);
CODEGEN_SET_FALLBACK(LOP_NEWCLOSURE, kFallbackUpdatePc);
CODEGEN_SET_FALLBACK(LOP_NAMECALL, 0);
CODEGEN_SET_FALLBACK(LOP_CALL, kFallbackUpdateCi | kFallbackCheckInterrupt);
CODEGEN_SET_FALLBACK(LOP_RETURN, kFallbackUpdateCi | kFallbackCheckInterrupt);
CODEGEN_SET_FALLBACK(LOP_CONCAT, 0);
CODEGEN_SET_FALLBACK(LOP_NEWTABLE, 0);
CODEGEN_SET_FALLBACK(LOP_DUPTABLE, 0);
CODEGEN_SET_FALLBACK(LOP_SETLIST, kFallbackCheckInterrupt);
CODEGEN_SET_FALLBACK(LOP_FORGPREP, kFallbackUpdatePc);
CODEGEN_SET_FALLBACK(LOP_FORGLOOP, kFallbackUpdatePc | kFallbackCheckInterrupt);
CODEGEN_SET_FALLBACK(LOP_FORGPREP_INEXT, kFallbackUpdatePc);
CODEGEN_SET_FALLBACK(LOP_FORGPREP_NEXT, kFallbackUpdatePc);
CODEGEN_SET_FALLBACK(LOP_GETVARARGS, 0);
CODEGEN_SET_FALLBACK(LOP_DUPCLOSURE, 0);
CODEGEN_SET_FALLBACK(LOP_PREPVARARGS, 0);
CODEGEN_SET_FALLBACK(LOP_LOADKX, 0);
CODEGEN_SET_FALLBACK(LOP_COVERAGE, 0);
CODEGEN_SET_FALLBACK(LOP_BREAK, 0);
}
void initHelperFunctions(NativeState& data)
{
static_assert(sizeof(data.context.luauF_table) / sizeof(data.context.luauF_table[0]) == sizeof(luauF_table) / sizeof(luauF_table[0]),
"fast call tables are not of the same length");
// Replace missing fast call functions with an empty placeholder that forces LOP_CALL fallback
for (size_t i = 0; i < sizeof(data.context.luauF_table) / sizeof(data.context.luauF_table[0]); i++)
data.context.luauF_table[i] = luauF_table[i] ? luauF_table[i] : luauF_missing;
data.context.luaV_lessthan = luaV_lessthan;
data.context.luaV_lessequal = luaV_lessequal;
data.context.luaV_equalval = luaV_equalval;
data.context.luaV_doarith = luaV_doarith;
data.context.luaV_dolen = luaV_dolen;
data.context.luaV_prepareFORN = luaV_prepareFORN;
data.context.luaV_gettable = luaV_gettable;
data.context.luaV_settable = luaV_settable;
data.context.luaH_getn = luaH_getn;
data.context.luaC_barriertable = luaC_barriertable;
data.context.libm_pow = pow;
}
void initInstructionNames(NativeState& data)
{
CODEGEN_SET_NAMES();
}
} // namespace CodeGen
} // namespace Luau

94
CodeGen/src/NativeState.h Normal file
View File

@ -0,0 +1,94 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Bytecode.h"
#include "Luau/CodeAllocator.h"
#include <memory>
#include <stdint.h>
#include "lobject.h"
#include "ltm.h"
typedef int (*luau_FastFunction)(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams);
namespace Luau
{
namespace CodeGen
{
class UnwindBuilder;
using FallbackFn = const Instruction*(lua_State* L, const Instruction* pc, StkId base, TValue* k);
constexpr uint8_t kFallbackUpdatePc = 1 << 0;
constexpr uint8_t kFallbackUpdateCi = 1 << 1;
constexpr uint8_t kFallbackCheckInterrupt = 1 << 2;
struct NativeFallback
{
FallbackFn* fallback;
uint8_t flags;
};
struct NativeProto
{
uintptr_t* instTargets = nullptr;
Proto* proto = nullptr;
uint32_t location = 0;
};
struct NativeContext
{
// Gateway (C => native transition) entry & exit, compiled at runtime
uint8_t* gateEntry = nullptr;
uint8_t* gateExit = nullptr;
// Opcode fallbacks, implemented in C
NativeFallback fallback[LOP__COUNT] = {};
// Fast call methods, implemented in C
luau_FastFunction luauF_table[256] = {};
// Helper functions, implemented in C
int (*luaV_lessthan)(lua_State* L, const TValue* l, const TValue* r) = nullptr;
int (*luaV_lessequal)(lua_State* L, const TValue* l, const TValue* r) = nullptr;
int (*luaV_equalval)(lua_State* L, const TValue* t1, const TValue* t2) = nullptr;
void (*luaV_doarith)(lua_State* L, StkId ra, const TValue* rb, const TValue* rc, TMS op) = nullptr;
void (*luaV_dolen)(lua_State* L, StkId ra, const TValue* rb) = nullptr;
void (*luaV_prepareFORN)(lua_State* L, StkId plimit, StkId pstep, StkId pinit) = nullptr;
void (*luaV_gettable)(lua_State* L, const TValue* t, TValue* key, StkId val) = nullptr;
void (*luaV_settable)(lua_State* L, const TValue* t, TValue* key, StkId val) = nullptr;
int (*luaH_getn)(Table* t) = nullptr;
void (*luaC_barriertable)(lua_State* L, Table* t, GCObject* v) = nullptr;
double (*libm_pow)(double, double) = nullptr;
};
struct NativeState
{
NativeState();
~NativeState();
CodeAllocator codeAllocator;
std::unique_ptr<UnwindBuilder> unwindBuilder;
// For annotations in assembly text generation
const char* names[LOP__COUNT] = {};
uint8_t* gateData = nullptr;
size_t gateDataSize = 0;
NativeContext context;
};
void initFallbackTable(NativeState& data);
void initHelperFunctions(NativeState& data);
void initInstructionNames(NativeState& data);
} // namespace CodeGen
} // namespace Luau

View File

@ -122,3 +122,9 @@ FValue<T>* FValue<T>::list = nullptr;
{ \
Luau::FValue<int> flag(#flag, def, true); \
}
#if defined(__GNUC__)
#define LUAU_PRINTF_ATTR(fmt, arg) __attribute__((format(printf, fmt, arg)))
#else
#define LUAU_PRINTF_ATTR(fmt, arg)
#endif

View File

@ -11,7 +11,6 @@ inline bool isFlagExperimental(const char* flag)
// Flags in this list are disabled by default in various command-line tools. They may have behavior that is not fully final,
// or critical bugs that are found after the code has been submitted.
static const char* kList[] = {
"LuauLowerBoundsCalculation",
"LuauInterpolatedStringBaseSupport",
"LuauInstantiateInSubtyping", // requires some fixes to lua-apps code
// makes sure we always have at least one entry

View File

@ -102,6 +102,11 @@ public:
void setDumpSource(const std::string& source);
bool needsDebugRemarks() const
{
return (dumpFlags & Dump_Remarks) != 0;
}
const std::string& getBytecode() const
{
LUAU_ASSERT(!bytecode.empty()); // did you forget to call finalize?
@ -110,6 +115,7 @@ public:
std::string dumpFunction(uint32_t id) const;
std::string dumpEverything() const;
std::string dumpSourceRemarks() const;
static uint32_t getImportId(int32_t id0);
static uint32_t getImportId(int32_t id0, int32_t id1);
@ -243,6 +249,7 @@ private:
uint32_t dumpFlags = 0;
std::vector<std::string> dumpSource;
std::vector<std::pair<int, std::string>> dumpRemarks;
std::string (BytecodeBuilder::*dumpFunctionPtr)() const = nullptr;

View File

@ -572,6 +572,7 @@ void BytecodeBuilder::addDebugRemark(const char* format, ...)
debugRemarkBuffer += '\0';
debugRemarks.emplace_back(uint32_t(insns.size()), uint32_t(offset));
dumpRemarks.emplace_back(debugLine, debugRemarkBuffer.c_str() + offset);
}
void BytecodeBuilder::finalize()
@ -1949,4 +1950,40 @@ std::string BytecodeBuilder::dumpEverything() const
return result;
}
std::string BytecodeBuilder::dumpSourceRemarks() const
{
std::string result;
size_t nextRemark = 0;
std::vector<std::pair<int, std::string>> remarks = dumpRemarks;
std::sort(remarks.begin(), remarks.end());
for (size_t i = 0; i < dumpSource.size(); ++i)
{
const std::string& line = dumpSource[i];
size_t indent = 0;
while (indent < line.length() && (line[indent] == ' ' || line[indent] == '\t'))
indent++;
while (nextRemark < remarks.size() && remarks[nextRemark].first == int(i + 1))
{
formatAppend(result, "%.*s-- remark: %s\n", int(indent), line.c_str(), remarks[nextRemark].second.c_str());
nextRemark++;
// skip duplicate remarks (due to inlining/unrolling)
while (nextRemark < remarks.size() && remarks[nextRemark] == remarks[nextRemark - 1])
nextRemark++;
}
result += line;
if (i + 1 < dumpSource.size())
result += '\n';
}
return result;
}
} // namespace Luau

View File

@ -709,6 +709,17 @@ struct Compiler
if (const int* id = builtins.find(expr))
bfid = *id;
if (bfid >= 0 && bytecode.needsDebugRemarks())
{
Builtin builtin = getBuiltin(expr->func, globals, variables);
bool lastMult = expr->args.size > 0 && isExprMultRet(expr->args.data[expr->args.size - 1]);
if (builtin.object.value)
bytecode.addDebugRemark("builtin %s.%s/%d%s", builtin.object.value, builtin.method.value, int(expr->args.size), lastMult ? "+" : "");
else if (builtin.method.value)
bytecode.addDebugRemark("builtin %s/%d%s", builtin.method.value, int(expr->args.size), lastMult ? "+" : "");
}
if (bfid == LBF_SELECT_VARARG)
{
// Optimization: compile select(_, ...) as FASTCALL1; the builtin will read variadic arguments directly
@ -918,6 +929,9 @@ struct Compiler
shared = int16_t(cid);
}
if (shared < 0)
bytecode.addDebugRemark("allocation: closure with %d upvalues", int(captures.size()));
if (shared >= 0)
bytecode.emitAD(LOP_DUPCLOSURE, target, shared);
else
@ -1599,6 +1613,8 @@ struct Compiler
{
TableShape shape = tableShapes[expr];
bytecode.addDebugRemark("allocation: table hash %d", shape.hashSize);
bytecode.emitABC(LOP_NEWTABLE, target, encodeHashSize(shape.hashSize), 0);
bytecode.emitAux(shape.arraySize);
return;
@ -1671,6 +1687,8 @@ struct Compiler
if (tid < 0)
CompileError::raise(expr->location, "Exceeded constant limit; simplify the code to compile");
bytecode.addDebugRemark("allocation: table template %d", hashSize);
if (tid < 32768)
{
bytecode.emitAD(LOP_DUPTABLE, reg, int16_t(tid));
@ -1690,8 +1708,17 @@ struct Compiler
bool trailingVarargs = last && last->kind == AstExprTable::Item::List && last->value->is<AstExprVarargs>();
LUAU_ASSERT(!trailingVarargs || arraySize > 0);
unsigned int arrayAllocation = arraySize - trailingVarargs + indexSize;
if (hashSize == 0)
bytecode.addDebugRemark("allocation: table array %d", arrayAllocation);
else if (arrayAllocation == 0)
bytecode.addDebugRemark("allocation: table hash %d", hashSize);
else
bytecode.addDebugRemark("allocation: table hash %d array %d", hashSize, arrayAllocation);
bytecode.emitABC(LOP_NEWTABLE, reg, uint8_t(encodedHashSize), 0);
bytecode.emitAux(arraySize - trailingVarargs + indexSize);
bytecode.emitAux(arrayAllocation);
}
unsigned int arrayChunkSize = std::min(16u, arraySize);

View File

@ -112,6 +112,11 @@ ifeq ($(protobuf),download)
EPROTOC=../build/libprotobuf-mutator/external.protobuf/bin/protoc
endif
ifneq ($(native),)
CXXFLAGS+=-DLUA_CUSTOM_EXECUTION=1
TESTS_ARGS+=--codegen
endif
# target-specific flags
$(AST_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include
$(COMPILER_OBJECTS): CXXFLAGS+=-std=c++17 -ICompiler/include -ICommon/include -IAst/include
@ -120,7 +125,7 @@ $(CODEGEN_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -ICodeGen/include -IVM
$(VM_OBJECTS): CXXFLAGS+=-std=c++11 -ICommon/include -IVM/include
$(ISOCLINE_OBJECTS): CXXFLAGS+=-Wno-unused-function -Iextern/isocline/include
$(TESTS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -ICodeGen/include -IVM/include -ICLI -Iextern -DDOCTEST_CONFIG_DOUBLE_STRINGIFY
$(REPL_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IVM/include -Iextern -Iextern/isocline/include
$(REPL_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IVM/include -ICodeGen/include -Iextern -Iextern/isocline/include
$(ANALYZE_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include -Iextern
$(FUZZ_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -IVM/include
@ -175,7 +180,7 @@ luau-tests: $(TESTS_TARGET)
# executable targets
$(TESTS_TARGET): $(TESTS_OBJECTS) $(ANALYSIS_TARGET) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(REPL_CLI_TARGET): $(REPL_CLI_OBJECTS) $(COMPILER_TARGET) $(AST_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(REPL_CLI_TARGET): $(REPL_CLI_OBJECTS) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(ANALYZE_CLI_TARGET): $(ANALYZE_CLI_OBJECTS) $(ANALYSIS_TARGET) $(AST_TARGET)
$(TESTS_TARGET) $(REPL_CLI_TARGET) $(ANALYZE_CLI_TARGET):

View File

@ -58,6 +58,7 @@ target_sources(Luau.CodeGen PRIVATE
CodeGen/include/Luau/AssemblyBuilderX64.h
CodeGen/include/Luau/CodeAllocator.h
CodeGen/include/Luau/CodeBlockUnwind.h
CodeGen/include/Luau/CodeGen.h
CodeGen/include/Luau/Condition.h
CodeGen/include/Luau/Label.h
CodeGen/include/Luau/OperandX64.h
@ -69,13 +70,25 @@ target_sources(Luau.CodeGen PRIVATE
CodeGen/src/AssemblyBuilderX64.cpp
CodeGen/src/CodeAllocator.cpp
CodeGen/src/CodeBlockUnwind.cpp
CodeGen/src/CodeGen.cpp
CodeGen/src/CodeGenX64.cpp
CodeGen/src/EmitBuiltinsX64.cpp
CodeGen/src/EmitCommonX64.cpp
CodeGen/src/EmitInstructionX64.cpp
CodeGen/src/Fallbacks.cpp
CodeGen/src/NativeState.cpp
CodeGen/src/UnwindBuilderDwarf2.cpp
CodeGen/src/UnwindBuilderWin.cpp
CodeGen/src/ByteUtils.h
CodeGen/src/CustomExecUtils.h
CodeGen/src/CodeGenX64.h
CodeGen/src/EmitBuiltinsX64.h
CodeGen/src/EmitCommonX64.h
CodeGen/src/EmitInstructionX64.h
CodeGen/src/Fallbacks.h
CodeGen/src/FallbacksProlog.h
CodeGen/src/NativeState.h
)
# Luau.Analysis Sources
@ -210,6 +223,7 @@ target_sources(Luau.VM PRIVATE
VM/src/lvmexecute.cpp
VM/src/lvmload.cpp
VM/src/lvmutils.cpp
VM/src/lapi.h
VM/src/lbuiltins.h
VM/src/lbytecode.h

View File

@ -38,10 +38,10 @@ enum lua_Status
enum lua_CoStatus
{
LUA_CORUN = 0, // running
LUA_COSUS, // suspended
LUA_CONOR, // 'normal' (it resumed another coroutine)
LUA_COFIN, // finished
LUA_COERR, // finished with error
LUA_COSUS, // suspended
LUA_CONOR, // 'normal' (it resumed another coroutine)
LUA_COFIN, // finished
LUA_COERR, // finished with error
};
typedef struct lua_State lua_State;
@ -398,16 +398,16 @@ LUA_API const char* lua_debugtrace(lua_State* L);
struct lua_Debug
{
const char* name; // (n)
const char* what; // (s) `Lua', `C', `main', `tail'
const char* source; // (s)
const char* short_src; // (s)
int linedefined; // (s)
int currentline; // (l)
unsigned char nupvals; // (u) number of upvalues
unsigned char nparams; // (a) number of parameters
char isvararg; // (a)
void* userdata; // only valid in luau_callhook
const char* name; // (n)
const char* what; // (s) `Lua', `C', `main', `tail'
const char* source; // (s)
const char* short_src; // (s)
int linedefined; // (s)
int currentline; // (l)
unsigned char nupvals; // (u) number of upvalues
unsigned char nparams; // (a) number of parameters
char isvararg; // (a)
void* userdata; // only valid in luau_callhook
char ssbuf[LUA_IDSIZE];
};

View File

@ -133,4 +133,4 @@
#define LUA_VECTOR_SIZE 3 // must be 3 or 4
#define LUA_EXTRA_SIZE LUA_VECTOR_SIZE - 2
#define LUA_EXTRA_SIZE (LUA_VECTOR_SIZE - 2)

View File

@ -263,11 +263,14 @@ static int luauF_log(lua_State* L, StkId res, TValue* arg0, int nresults, StkId
static int luauF_max(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
if (nparams >= 1 && nresults <= 1 && ttisnumber(arg0))
if (nparams >= 2 && nresults <= 1 && ttisnumber(arg0) && ttisnumber(args))
{
double r = nvalue(arg0);
double a1 = nvalue(arg0);
double a2 = nvalue(args);
for (int i = 2; i <= nparams; ++i)
double r = (a2 > a1) ? a2 : a1;
for (int i = 3; i <= nparams; ++i)
{
if (!ttisnumber(args + (i - 2)))
return -1;
@ -286,11 +289,14 @@ static int luauF_max(lua_State* L, StkId res, TValue* arg0, int nresults, StkId
static int luauF_min(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
if (nparams >= 1 && nresults <= 1 && ttisnumber(arg0))
if (nparams >= 2 && nresults <= 1 && ttisnumber(arg0) && ttisnumber(args))
{
double r = nvalue(arg0);
double a1 = nvalue(arg0);
double a2 = nvalue(args);
for (int i = 2; i <= nparams; ++i)
double r = (a2 < a1) ? a2 : a1;
for (int i = 3; i <= nparams; ++i)
{
if (!ttisnumber(args + (i - 2)))
return -1;
@ -439,22 +445,18 @@ static int luauF_arshift(lua_State* L, StkId res, TValue* arg0, int nresults, St
static int luauF_band(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
if (nparams >= 1 && nresults <= 1)
if (nparams >= 2 && nresults <= 1 && ttisnumber(arg0) && ttisnumber(args))
{
uint32_t r = ~0u;
double a1 = nvalue(arg0);
double a2 = nvalue(args);
if (!ttisnumber(arg0))
return -1;
unsigned u1, u2;
luai_num2unsigned(u1, a1);
luai_num2unsigned(u2, a2);
{
double a1 = nvalue(arg0);
unsigned u;
luai_num2unsigned(u, a1);
uint32_t r = u1 & u2;
r &= u;
}
for (int i = 2; i <= nparams; ++i)
for (int i = 3; i <= nparams; ++i)
{
if (!ttisnumber(args + (i - 2)))
return -1;
@ -492,22 +494,18 @@ static int luauF_bnot(lua_State* L, StkId res, TValue* arg0, int nresults, StkId
static int luauF_bor(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
if (nparams >= 1 && nresults <= 1)
if (nparams >= 2 && nresults <= 1 && ttisnumber(arg0) && ttisnumber(args))
{
uint32_t r = 0;
double a1 = nvalue(arg0);
double a2 = nvalue(args);
if (!ttisnumber(arg0))
return -1;
unsigned u1, u2;
luai_num2unsigned(u1, a1);
luai_num2unsigned(u2, a2);
{
double a1 = nvalue(arg0);
unsigned u;
luai_num2unsigned(u, a1);
uint32_t r = u1 | u2;
r |= u;
}
for (int i = 2; i <= nparams; ++i)
for (int i = 3; i <= nparams; ++i)
{
if (!ttisnumber(args + (i - 2)))
return -1;
@ -528,22 +526,18 @@ static int luauF_bor(lua_State* L, StkId res, TValue* arg0, int nresults, StkId
static int luauF_bxor(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
if (nparams >= 1 && nresults <= 1)
if (nparams >= 2 && nresults <= 1 && ttisnumber(arg0) && ttisnumber(args))
{
uint32_t r = 0;
double a1 = nvalue(arg0);
double a2 = nvalue(args);
if (!ttisnumber(arg0))
return -1;
unsigned u1, u2;
luai_num2unsigned(u1, a1);
luai_num2unsigned(u2, a2);
{
double a1 = nvalue(arg0);
unsigned u;
luai_num2unsigned(u, a1);
uint32_t r = u1 ^ u2;
r ^= u;
}
for (int i = 2; i <= nparams; ++i)
for (int i = 3; i <= nparams; ++i)
{
if (!ttisnumber(args + (i - 2)))
return -1;
@ -564,22 +558,18 @@ static int luauF_bxor(lua_State* L, StkId res, TValue* arg0, int nresults, StkId
static int luauF_btest(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams)
{
if (nparams >= 1 && nresults <= 1)
if (nparams >= 2 && nresults <= 1 && ttisnumber(arg0) && ttisnumber(args))
{
uint32_t r = ~0u;
double a1 = nvalue(arg0);
double a2 = nvalue(args);
if (!ttisnumber(arg0))
return -1;
unsigned u1, u2;
luai_num2unsigned(u1, a1);
luai_num2unsigned(u2, a2);
{
double a1 = nvalue(arg0);
unsigned u;
luai_num2unsigned(u, a1);
uint32_t r = u1 & u2;
r &= u;
}
for (int i = 2; i <= nparams; ++i)
for (int i = 3; i <= nparams; ++i)
{
if (!ttisnumber(args + (i - 2)))
return -1;

View File

@ -108,7 +108,7 @@ UpVal* luaF_findupval(lua_State* L, StkId level)
void luaF_freeupval(lua_State* L, UpVal* uv, lua_Page* page)
{
luaM_freegco(L, uv, sizeof(UpVal), uv->memcat, page); // free upvalue
luaM_freegco(L, uv, sizeof(UpVal), uv->memcat, page); // free upvalue
}
void luaF_close(lua_State* L, StkId level)

View File

@ -13,8 +13,6 @@
#include <stdio.h>
#include <stdlib.h>
LUAU_FASTFLAG(LuauFasterGetInfo)
const TValue luaO_nilobject_ = {{NULL}, {0}, LUA_TNIL};

View File

@ -290,6 +290,7 @@ typedef struct Proto
int sizelineinfo;
int linegaplog2;
int linedefined;
int bytecodeid;
uint8_t nups; // number of upvalues

View File

@ -100,6 +100,12 @@ static void close_state(lua_State* L)
LUAU_ASSERT(g->memcatbytes[0] == sizeof(LG));
for (int i = 1; i < LUA_MEMORY_CATEGORIES; i++)
LUAU_ASSERT(g->memcatbytes[i] == 0);
#if LUA_CUSTOM_EXECUTION
if (L->global->ecb.close)
L->global->ecb.close(L);
#endif
(*g->frealloc)(g->ud, L, sizeof(LG), 0);
}

View File

@ -146,18 +146,15 @@ struct GCMetrics
};
#endif
#if LUA_CUSTOM_EXECUTION
// Callbacks that can be used to to redirect code execution from Luau bytecode VM to a custom implementation (AoT/JiT/sandboxing/...)
typedef struct lua_ExecutionCallbacks
struct lua_ExecutionCallbacks
{
void* context;
void (*close)(lua_State* L); // called when global VM state is closed
void (*destroy)(lua_State* L, Proto* proto); // called when function is destroyed
int (*enter)(lua_State* L, Proto* proto); // called when function is about to start/resume (when execdata is present), return 0 to exit VM
void (*setbreakpoint)(lua_State* L, Proto* proto, int line); // called when a breakpoint is set in a function
} lua_ExecutionCallbacks;
#endif
};
/*
** `global state', shared by all threads of this state

View File

@ -16,6 +16,8 @@
#include <string.h>
LUAU_FASTFLAGVARIABLE(LuauNoTopRestoreInFastCall, false)
// Disable c99-designator to avoid the warning in CGOTO dispatch table
#ifdef __clang__
#if __has_warning("-Wc99-designator")
@ -2537,6 +2539,8 @@ reentry:
if (n >= 0)
{
// when nresults != MULTRET, L->top might be pointing to the middle of stack frame if nparams is equal to MULTRET
// instead of restoring L->top to L->ci->top if nparams is MULTRET, we do it unconditionally to skip an extra check
L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top;
pc += skip + 1; // skip instructions that compute function as well as CALL
@ -2613,7 +2617,15 @@ reentry:
if (n >= 0)
{
L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top;
if (FFlag::LuauNoTopRestoreInFastCall)
{
if (nresults == LUA_MULTRET)
L->top = ra + n;
}
else
{
L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top;
}
pc += skip + 1; // skip instructions that compute function as well as CALL
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
@ -2661,7 +2673,15 @@ reentry:
if (n >= 0)
{
L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top;
if (FFlag::LuauNoTopRestoreInFastCall)
{
if (nresults == LUA_MULTRET)
L->top = ra + n;
}
else
{
L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top;
}
pc += skip + 1; // skip instructions that compute function as well as CALL
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
@ -2709,7 +2729,15 @@ reentry:
if (n >= 0)
{
L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top;
if (FFlag::LuauNoTopRestoreInFastCall)
{
if (nresults == LUA_MULTRET)
L->top = ra + n;
}
else
{
L->top = (nresults == LUA_MULTRET) ? ra + n : L->ci->top;
}
pc += skip + 1; // skip instructions that compute function as well as CALL
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));

View File

@ -192,6 +192,7 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
{
Proto* p = luaF_newproto(L);
p->source = source;
p->bytecodeid = int(i);
p->maxstacksize = read<uint8_t>(data, size, offset);
p->numparams = read<uint8_t>(data, size, offset);

View File

@ -5,6 +5,7 @@ function test()
local band = bit32.band
local bnot = bit32.bnot
local bxor = bit32.bxor
local bor = bit32.bor
local rrotate = bit32.rrotate
local rshift = bit32.rshift
@ -79,14 +80,14 @@ function test()
h, g, f, e, d, c, b, a = g, f, e, d + t1, c, b, a, t1 + t2
end
hash[1] = band(hash[1] + a)
hash[2] = band(hash[2] + b)
hash[3] = band(hash[3] + c)
hash[4] = band(hash[4] + d)
hash[5] = band(hash[5] + e)
hash[6] = band(hash[6] + f)
hash[7] = band(hash[7] + g)
hash[8] = band(hash[8] + h)
hash[1] = bor(hash[1] + a, 0)
hash[2] = bor(hash[2] + b, 0)
hash[3] = bor(hash[3] + c, 0)
hash[4] = bor(hash[4] + d, 0)
hash[5] = bor(hash[5] + e, 0)
hash[6] = bor(hash[6] + f, 0)
hash[7] = bor(hash[7] + g, 0)
hash[8] = bor(hash[8] + h, 0)
end
local function sha256(msg)
@ -139,4 +140,4 @@ function test()
return ts1 - ts0
end
bench.runCode(test, "sha256")
bench.runCode(test, "sha256")

View File

@ -177,6 +177,9 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "BaseUnaryInstructionForms")
SINGLE_COMPARE(imul(r9), 0x49, 0xf7, 0xe9);
SINGLE_COMPARE(neg(r9), 0x49, 0xf7, 0xd9);
SINGLE_COMPARE(not_(r12), 0x49, 0xf7, 0xd4);
SINGLE_COMPARE(inc(r12), 0x49, 0xff, 0xc4);
SINGLE_COMPARE(dec(ecx), 0xff, 0xc9);
SINGLE_COMPARE(dec(byte[rdx]), 0xfe, 0x0a);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfMov")
@ -365,7 +368,6 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXUnaryMergeInstructionForms")
SINGLE_COMPARE(vsqrtss(xmm8, xmm10, dword[r9]), 0xc4, 0x41, 0xaa, 0x51, 0x01);
// Coverage for other instructions that follow the same pattern
SINGLE_COMPARE(vcomisd(xmm8, xmm10), 0xc4, 0x41, 0xf9, 0x2f, 0xc2);
SINGLE_COMPARE(vucomisd(xmm1, xmm4), 0xc4, 0xe1, 0xf9, 0x2e, 0xcc);
}
@ -405,9 +407,10 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXConversionInstructionForms")
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXTernaryInstructionForms")
{
SINGLE_COMPARE(vroundsd(xmm7, xmm12, xmm3, 9), 0xc4, 0xe3, 0x99, 0x0b, 0xfb, 0x09);
SINGLE_COMPARE(vroundsd(xmm8, xmm13, xmmword[r13 + rdx], 9), 0xc4, 0x43, 0x91, 0x0b, 0x44, 0x15, 0x00, 0x09);
SINGLE_COMPARE(vroundsd(xmm9, xmm14, xmmword[rcx + r10], 1), 0xc4, 0x23, 0x89, 0x0b, 0x0c, 0x11, 0x01);
SINGLE_COMPARE(vroundsd(xmm7, xmm12, xmm3, RoundingModeX64::RoundToNegativeInfinity), 0xc4, 0xe3, 0x99, 0x0b, 0xfb, 0x09);
SINGLE_COMPARE(
vroundsd(xmm8, xmm13, xmmword[r13 + rdx], RoundingModeX64::RoundToPositiveInfinity), 0xc4, 0x43, 0x91, 0x0b, 0x44, 0x15, 0x00, 0x0a);
SINGLE_COMPARE(vroundsd(xmm9, xmm14, xmmword[rcx + r10], RoundingModeX64::RoundToZero), 0xc4, 0x23, 0x89, 0x0b, 0x0c, 0x11, 0x0b);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "MiscInstructions")
@ -441,7 +444,7 @@ TEST_CASE("LogTest")
build.movsx(rsi, word[r12]);
build.imul(rcx, rdx);
build.imul(rcx, rdx, 8);
build.vroundsd(xmm1, xmm2, xmm3, 5);
build.vroundsd(xmm1, xmm2, xmm3, RoundingModeX64::RoundToNearestEven);
build.pop(r12);
build.ret();
build.int3();
@ -469,7 +472,7 @@ TEST_CASE("LogTest")
movsx rsi,word ptr [r12]
imul rcx,rdx
imul rcx,rdx,8
vroundsd xmm1,xmm2,xmm3,5
vroundsd xmm1,xmm2,xmm3,8
pop r12
ret
int3

View File

@ -42,4 +42,4 @@ bool FindNthOccurenceOf::visit(AstTypePack* t)
return checkIt(t);
}
}
} // namespace Luau

View File

@ -80,4 +80,4 @@ T* query(AstNode* node, const std::vector<Nth>& nths = {nth<T>(N)})
return node ? node->as<T>() : nullptr;
}
}
} // namespace Luau

View File

@ -2873,6 +2873,43 @@ RETURN R0 0
)");
}
TEST_CASE("SourceRemarks")
{
const char* source = R"(
local a, b = ...
local function foo(x)
return(math.abs(x))
end
return foo(a) + foo(assert(b))
)";
Luau::BytecodeBuilder bcb;
bcb.setDumpFlags(Luau::BytecodeBuilder::Dump_Source | Luau::BytecodeBuilder::Dump_Remarks);
bcb.setDumpSource(source);
Luau::CompileOptions options;
options.optimizationLevel = 2;
Luau::compileOrThrow(bcb, source, options);
std::string remarks = bcb.dumpSourceRemarks();
CHECK_EQ(remarks, R"(
local a, b = ...
local function foo(x)
-- remark: builtin math.abs/1
return(math.abs(x))
end
-- remark: builtin assert/1
-- remark: inlining succeeded (cost 2, profit 2.50x, depth 0)
return foo(a) + foo(assert(b))
)");
}
TEST_CASE("AssignmentConflict")
{
// assignments are left to right
@ -5042,7 +5079,6 @@ GETIMPORT R2 2
CALL R2 0 0
RETURN R1 1
)");
}
TEST_CASE("InlineNestedLoops")

View File

@ -8,6 +8,7 @@
#include "Luau/TypeInfer.h"
#include "Luau/StringUtils.h"
#include "Luau/BytecodeBuilder.h"
#include "Luau/CodeGen.h"
#include "doctest.h"
#include "ScopedFlags.h"
@ -17,6 +18,7 @@
#include <math.h>
extern bool verbose;
extern bool codegen;
extern int optimizationLevel;
static lua_CompileOptions defaultOptions()
@ -137,7 +139,7 @@ int lua_silence(lua_State* L)
using StateRef = std::unique_ptr<lua_State, void (*)(lua_State*)>;
static StateRef runConformance(const char* name, void (*setup)(lua_State* L) = nullptr, void (*yield)(lua_State* L) = nullptr,
lua_State* initialLuaState = nullptr, lua_CompileOptions* options = nullptr)
lua_State* initialLuaState = nullptr, lua_CompileOptions* options = nullptr, bool skipCodegen = false)
{
std::string path = __FILE__;
path.erase(path.find_last_of("\\/"));
@ -156,6 +158,9 @@ static StateRef runConformance(const char* name, void (*setup)(lua_State* L) = n
StateRef globalState(initialLuaState, lua_close);
lua_State* L = globalState.get();
if (codegen && !skipCodegen && Luau::CodeGen::isSupported())
Luau::CodeGen::create(L);
luaL_openlibs(L);
// Register a few global functions for conformance tests
@ -207,6 +212,9 @@ static StateRef runConformance(const char* name, void (*setup)(lua_State* L) = n
int result = luau_load(L, chunkname.c_str(), bytecode, bytecodeSize, 0);
free(bytecode);
if (result == 0 && codegen && !skipCodegen && Luau::CodeGen::isSupported())
Luau::CodeGen::compile(L, -1);
int status = (result == 0) ? lua_resume(L, nullptr, 0) : LUA_ERRSYNTAX;
while (yield && (status == LUA_YIELD || status == LUA_BREAK))
@ -563,19 +571,19 @@ TEST_CASE("Debugger")
};
// add breakpoint() function
lua_pushcfunction(
lua_pushcclosurek(
L,
[](lua_State* L) -> int {
int line = luaL_checkinteger(L, 1);
bool enabled = luaL_optboolean(L, 2, true);
lua_Debug ar = {};
lua_getinfo(L, 1, "f", &ar);
lua_getinfo(L, lua_stackdepth(L) - 1, "f", &ar);
lua_breakpoint(L, -1, line, enabled);
return 0;
},
"breakpoint");
"breakpoint", 0, nullptr);
lua_setglobal(L, "breakpoint");
},
[](lua_State* L) {
@ -656,9 +664,9 @@ TEST_CASE("Debugger")
interruptedthread = nullptr;
}
},
nullptr, &copts);
nullptr, &copts, /* skipCodegen */ true); // Native code doesn't support debugging yet
CHECK(breakhits == 10); // 2 hits per breakpoint
CHECK(breakhits == 12); // 2 hits per breakpoint
}
TEST_CASE("SameHash")
@ -806,17 +814,17 @@ TEST_CASE("ApiIter")
{
sum1 += lua_tonumber(L, -2); // key
sum1 += lua_tonumber(L, -1); // value
lua_pop(L, 1); // pop value, key is used by lua_next
lua_pop(L, 1); // pop value, key is used by lua_next
}
CHECK(sum1 == 580);
// Luau iteration interface: lua_rawiter (faster and preferable to lua_next)
double sum2 = 0;
for (int index = 0; index = lua_rawiter(L, -1, index), index >= 0; )
for (int index = 0; index = lua_rawiter(L, -1, index), index >= 0;)
{
sum2 += lua_tonumber(L, -2); // key
sum2 += lua_tonumber(L, -1); // value
lua_pop(L, 2); // pop both key and value
lua_pop(L, 2); // pop both key and value
}
CHECK(sum2 == 580);
@ -1188,7 +1196,13 @@ TEST_CASE("Interrupt")
5,
5,
6,
11,
18,
13,
13,
13,
13,
16,
20,
};
static int index;
@ -1283,6 +1297,21 @@ TEST_CASE("UserdataApi")
*(int*)ud3 = 43;
*(char*)ud4 = 3;
// user data with named metatable
luaL_newmetatable(L, "udata1");
luaL_newmetatable(L, "udata2");
void* ud5 = lua_newuserdata(L, 0);
lua_getfield(L, LUA_REGISTRYINDEX, "udata1");
lua_setmetatable(L, -2);
void* ud6 = lua_newuserdata(L, 0);
lua_getfield(L, LUA_REGISTRYINDEX, "udata2");
lua_setmetatable(L, -2);
CHECK(luaL_checkudata(L, -2, "udata1") == ud5);
CHECK(luaL_checkudata(L, -1, "udata2") == ud6);
globalState.reset();
CHECK(dtorhits == 42);
@ -1494,4 +1523,9 @@ TEST_CASE("Userdata")
});
}
TEST_CASE("SafeEnv")
{
runConformance("safeenv.lua");
}
TEST_SUITE_END();

View File

@ -14,4 +14,4 @@ ConstraintGraphBuilderFixture::ConstraintGraphBuilderFixture()
BlockedTypePack::nextIndex = 0;
}
}
} // namespace Luau

View File

@ -24,4 +24,4 @@ struct ConstraintGraphBuilderFixture : Fixture
ConstraintGraphBuilderFixture();
};
}
} // namespace Luau

View File

@ -92,7 +92,8 @@ std::optional<std::string> TestFileResolver::getEnvironmentForModule(const Modul
Fixture::Fixture(bool freeze, bool prepareAutocomplete)
: sff_DebugLuauFreezeArena("DebugLuauFreezeArena", freeze)
, frontend(&fileResolver, &configResolver, {/* retainFullTypeGraphs= */ true, /* forAutocomplete */ false, /* randomConstraintResolutionSeed */ randomSeed})
, frontend(&fileResolver, &configResolver,
{/* retainFullTypeGraphs= */ true, /* forAutocomplete */ false, /* randomConstraintResolutionSeed */ randomSeed})
, typeChecker(frontend.typeChecker)
, singletonTypes(frontend.singletonTypes)
{

View File

@ -1029,7 +1029,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "reexport_cyclic_type")
{
ScopedFastFlag sff[] = {
{"LuauForceExportSurfacesToBeNormal", true},
{"LuauLowerBoundsCalculation", true},
};
fileResolver.source["Module/A"] = R"(
@ -1065,7 +1064,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "reexport_type_alias")
{
ScopedFastFlag sff[] = {
{"LuauForceExportSurfacesToBeNormal", true},
{"LuauLowerBoundsCalculation", true},
};
fileResolver.source["Module/A"] = R"(

View File

@ -45,8 +45,6 @@ TEST_CASE_FIXTURE(Fixture, "DeprecatedGlobal")
TEST_CASE_FIXTURE(Fixture, "DeprecatedGlobalNoReplacement")
{
ScopedFastFlag sff{"LuauLintFixDeprecationMessage", true};
// Normally this would be defined externally, so hack it in for testing
const char* deprecationReplacementString = "";
addGlobalBinding(frontend, "Version", Binding{typeChecker.anyType, {}, true, deprecationReplacementString});

View File

@ -11,7 +11,6 @@
using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
TEST_SUITE_BEGIN("ModuleTests");
@ -107,11 +106,8 @@ TEST_CASE_FIXTURE(Fixture, "deepClone_cyclic_table")
REQUIRE(methodReturnType);
CHECK_EQ(methodReturnType, counterCopy);
if (FFlag::LuauLowerBoundsCalculation)
CHECK_EQ(3, dest.typePacks.size()); // function args, its return type, and the hidden any... pack
else
CHECK_EQ(2, dest.typePacks.size()); // one for the function args, and another for its return type
CHECK_EQ(2, dest.typeVars.size()); // One table and one function
CHECK_EQ(2, dest.typePacks.size()); // one for the function args, and another for its return type
CHECK_EQ(2, dest.typeVars.size()); // One table and one function
}
TEST_CASE_FIXTURE(BuiltinsFixture, "builtin_types_point_into_globalTypes_arena")

View File

@ -12,8 +12,6 @@ using namespace Luau;
struct NormalizeFixture : Fixture
{
ScopedFastFlag sff1{"LuauLowerBoundsCalculation", true};
bool isSubtype(TypeId a, TypeId b)
{
return ::Luau::isSubtype(a, b, NotNull{getMainModule()->getModuleScope().get()}, singletonTypes, ice);
@ -116,159 +114,6 @@ TEST_CASE_FIXTURE(NormalizeFixture, "functions_and_any")
CHECK(!isSubtype(a, b));
}
TEST_CASE_FIXTURE(NormalizeFixture, "intersection_of_functions_of_different_arities")
{
check(R"(
type A = (any) -> ()
type B = (any, any) -> ()
type T = A & B
local a: A
local b: B
local t: T
)");
TypeId a = requireType("a");
TypeId b = requireType("b");
CHECK(!isSubtype(a, b)); // !!
CHECK(!isSubtype(b, a));
CHECK("((any) -> ()) & ((any, any) -> ())" == toString(requireType("t")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "functions_with_mismatching_arity")
{
check(R"(
local a: (number) -> ()
local b: () -> ()
local c: () -> number
)");
TypeId a = requireType("a");
TypeId b = requireType("b");
TypeId c = requireType("c");
CHECK(!isSubtype(b, a));
CHECK(!isSubtype(c, a));
CHECK(!isSubtype(a, b));
CHECK(!isSubtype(c, b));
CHECK(!isSubtype(a, c));
CHECK(!isSubtype(b, c));
}
TEST_CASE_FIXTURE(NormalizeFixture, "functions_with_mismatching_arity_but_optional_parameters")
{
/*
* (T0..TN) <: (T0..TN, A?)
* (T0..TN) <: (T0..TN, any)
* (T0..TN, A?) </: (T0..TN) We don't technically need to spell this out, but it's quite important.
* T <: T
* if A <: B and B <: C then A <: C
* T -> R <: U -> S if U <: T and R <: S
* A | B <: T if A <: T and B <: T
* T <: A | B if T <: A or T <: B
*/
check(R"(
local a: (number?) -> ()
local b: (number) -> ()
local c: (number, number?) -> ()
)");
TypeId a = requireType("a");
TypeId b = requireType("b");
TypeId c = requireType("c");
/*
* (number) -> () </: (number?) -> ()
* because number? </: number (because number <: number, but nil </: number)
*/
CHECK(!isSubtype(b, a));
/*
* (number, number?) </: (number?) -> ()
* because number? </: number (as above)
*/
CHECK(!isSubtype(c, a));
/*
* (number?) -> () <: (number) -> ()
* because number <: number? (because number <: number)
*/
CHECK(isSubtype(a, b));
/*
* (number, number?) -> () <: (number) -> (number)
* The packs have inequal lengths, but (number) <: (number, number?)
* and number <: number
*/
CHECK(!isSubtype(c, b));
/*
* (number?) -> () </: (number, number?) -> ()
* because (number, number?) </: (number)
*/
CHECK(!isSubtype(a, c));
/*
* (number) -> () </: (number, number?) -> ()
* because (number, number?) </: (number)
*/
CHECK(!isSubtype(b, c));
}
TEST_CASE_FIXTURE(NormalizeFixture, "functions_with_mismatching_arity_but_any_is_an_optional_param")
{
check(R"(
local a: (number?) -> ()
local b: (number) -> ()
local c: (number, any) -> ()
)");
TypeId a = requireType("a");
TypeId b = requireType("b");
TypeId c = requireType("c");
/*
* (number) -> () </: (number?) -> ()
* because number? </: number (because number <: number, but nil </: number)
*/
CHECK(!isSubtype(b, a));
/*
* (number, any) </: (number?) -> ()
* because number? </: number (as above)
*/
CHECK(!isSubtype(c, a));
/*
* (number?) -> () <: (number) -> ()
* because number <: number? (because number <: number)
*/
CHECK(isSubtype(a, b));
/*
* (number, any) -> () </: (number) -> (number)
* The packs have inequal lengths
*/
CHECK(!isSubtype(c, b));
/*
* (number?) -> () </: (number, any) -> ()
* The packs have inequal lengths
*/
CHECK(!isSubtype(a, c));
/*
* (number) -> () </: (number, any) -> ()
* The packs have inequal lengths
*/
CHECK(!isSubtype(b, c));
}
TEST_CASE_FIXTURE(NormalizeFixture, "variadic_functions_with_no_head")
{
check(R"(
@ -356,7 +201,7 @@ TEST_CASE_FIXTURE(NormalizeFixture, "table_with_any_prop")
TEST_CASE_FIXTURE(NormalizeFixture, "intersection")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -398,16 +243,6 @@ TEST_CASE_FIXTURE(NormalizeFixture, "union_and_intersection")
CHECK(isSubtype(a, b));
}
TEST_CASE_FIXTURE(NormalizeFixture, "table_with_table_prop")
{
check(R"(
type T = {x: {y: number}} & {x: {y: string}}
local a: T
)");
CHECK_EQ("{| x: {| y: number & string |} |}", toString(requireType("a")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "tables")
{
check(R"(
@ -552,50 +387,10 @@ TEST_CASE_FIXTURE(NormalizeFixture, "metatable" * doctest::expected_failures{1})
}
#endif
TEST_CASE_FIXTURE(NormalizeFixture, "intersection_of_tables")
{
check(R"(
type T = {x: number} & ({x: number} & {y: string?})
local t: T
)");
CHECK("{| x: number, y: string? |}" == toString(requireType("t")));
}
TEST_SUITE_END();
TEST_SUITE_BEGIN("Normalize");
TEST_CASE_FIXTURE(NormalizeFixture, "intersection_of_disjoint_tables")
{
check(R"(
type T = {a: number} & {b: number}
local t: T
)");
CHECK_EQ("{| a: number, b: number |}", toString(requireType("t")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "intersection_of_overlapping_tables")
{
check(R"(
type T = {a: number, b: string} & {b: number, c: string}
local t: T
)");
CHECK_EQ("{| a: number, b: number & string, c: string |}", toString(requireType("t")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "intersection_of_confluent_overlapping_tables")
{
check(R"(
type T = {a: number, b: string} & {b: string, c: string}
local t: T
)");
CHECK_EQ("{| a: number, b: string, c: string |}", toString(requireType("t")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "union_with_overlapping_field_that_has_a_subtype_relationship")
{
check(R"(
@ -616,54 +411,6 @@ TEST_CASE_FIXTURE(NormalizeFixture, "union_with_overlapping_field_that_has_a_sub
CHECK_EQ("{| x: number? |}", toString(tType, {true}));
}
TEST_CASE_FIXTURE(NormalizeFixture, "intersection_of_functions")
{
check(R"(
type T = ((any) -> string) & ((number) -> string)
local t: T
)");
CHECK_EQ("(any) -> string", toString(requireType("t")));
}
TEST_CASE_FIXTURE(Fixture, "normalize_module_return_type")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
};
check(R"(
--!nonstrict
if Math.random() then
return function(initialState, handlers)
return function(state, action)
return state
end
end
else
return function(initialState, handlers)
return function(state, action)
return state
end
end
end
)");
CHECK_EQ("(any, any) -> (...any)", toString(getMainModule()->getModuleScope()->returnType));
}
TEST_CASE_FIXTURE(Fixture, "return_type_is_not_a_constrained_intersection")
{
check(R"(
function foo(x:number, y:number)
return x + y
end
)");
CHECK_EQ("(number, number) -> number", toString(requireType("foo")));
}
TEST_CASE_FIXTURE(Fixture, "higher_order_function")
{
check(R"(
@ -689,52 +436,6 @@ TEST_CASE_FIXTURE(Fixture, "higher_order_function_with_annotation")
CHECK_EQ("<a, b>((a) -> b, a) -> b", toString(requireType("apply")));
}
// Unfortunately, getting this right in the general case is difficult.
TEST_CASE_FIXTURE(Fixture, "cyclic_table_is_not_marked_normal")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
check(R"(
type Fiber = {
return_: Fiber?
}
local f: Fiber
)");
TypeId t = requireType("f");
CHECK(!t->normal);
}
TEST_CASE_FIXTURE(Fixture, "variadic_tail_is_marked_normal")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
type Weirdo = (...{x: number}) -> ()
local w: Weirdo
)");
LUAU_REQUIRE_NO_ERRORS(result);
TypeId t = requireType("w");
auto ftv = get<FunctionTypeVar>(t);
REQUIRE(ftv);
auto [argHead, argTail] = flatten(ftv->argTypes);
CHECK(argHead.empty());
REQUIRE(argTail.has_value());
auto vtp = get<VariadicTypePack>(*argTail);
REQUIRE(vtp);
CHECK(vtp->ty->normal);
}
TEST_CASE_FIXTURE(Fixture, "cyclic_table_normalizes_sensibly")
{
CheckResult result = check(R"(
@ -750,343 +451,6 @@ TEST_CASE_FIXTURE(Fixture, "cyclic_table_normalizes_sensibly")
CHECK_EQ("t1 where t1 = { get: () -> t1 }", toString(ty, {true}));
}
TEST_CASE_FIXTURE(Fixture, "cyclic_union")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
type T = {T?}?
local a: T
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("t1? where t1 = {t1?}" == toString(requireType("a")));
}
TEST_CASE_FIXTURE(Fixture, "cyclic_intersection")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
type T = {T & {}}
local a: T
)");
LUAU_REQUIRE_NO_ERRORS(result);
// FIXME: We are not properly normalizing this type, but we are at least not improperly discarding information
CHECK("t1 where t1 = {{t1 & {| |}}}" == toString(requireType("a"), {true}));
}
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_indexers")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
type A = {number}
type B = {string}
type C = A & B
local a: C
)");
LUAU_REQUIRE_NO_ERRORS(result);
// FIXME: We are not properly normalizing this type, but we are at least not improperly discarding information
CHECK("{number & string}" == toString(requireType("a"), {true}));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "union_of_distinct_free_types")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
function fussy(a, b)
if math.random() > 0.5 then
return a
else
return b
end
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("<a, b>(a, b) -> a | b" == toString(requireType("fussy")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "constrained_intersection_of_intersections")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
local f : (() -> number) | ((number) -> number)
local g : (() -> number) | ((string) -> number)
function h()
if math.random() then
return f
else
return g
end
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
TypeId h = requireType("h");
CHECK("() -> (() -> number) | ((number) -> number) | ((string) -> number)" == toString(h));
}
TEST_CASE_FIXTURE(Fixture, "intersection_inside_a_table_inside_another_intersection")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
type X = {}
type Y = {y: number}
type Z = {z: string}
type W = {w: boolean}
type T = {x: Y & X} & {x:Z & W}
local x: X
local y: Y
local z: Z
local w: W
local t: T
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("{| |}" == toString(requireType("x"), {true}));
CHECK("{| y: number |}" == toString(requireType("y"), {true}));
CHECK("{| z: string |}" == toString(requireType("z"), {true}));
CHECK("{| w: boolean |}" == toString(requireType("w"), {true}));
CHECK("{| x: {| w: boolean, y: number, z: string |} |}" == toString(requireType("t"), {true}));
}
TEST_CASE_FIXTURE(Fixture, "intersection_inside_a_table_inside_another_intersection_2")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
// We use a function and inferred parameter types to prevent intermediate normalizations from being performed.
// This exposes a bug where the type of y is mutated.
CheckResult result = check(R"(
function strange(w, x, y, z)
y.y = 5
z.z = "five"
w.w = true
type Z = {x: typeof(x) & typeof(y)} & {x: typeof(w) & typeof(z)}
return ((nil :: any) :: Z)
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
TypeId t = requireType("strange");
auto ftv = get<FunctionTypeVar>(t);
REQUIRE(ftv != nullptr);
std::vector<TypeId> args = flatten(ftv->argTypes).first;
REQUIRE(4 == args.size());
CHECK("{+ w: boolean +}" == toString(args[0]));
CHECK("a" == toString(args[1]));
CHECK("{+ y: number +}" == toString(args[2]));
CHECK("{+ z: string +}" == toString(args[3]));
std::vector<TypeId> ret = flatten(ftv->retTypes).first;
REQUIRE(1 == ret.size());
CHECK("{| x: a & {+ w: boolean, y: number, z: string +} |}" == toString(ret[0]));
}
TEST_CASE_FIXTURE(Fixture, "intersection_inside_a_table_inside_another_intersection_3")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
// We use a function and inferred parameter types to prevent intermediate normalizations from being performed.
// This exposes a bug where the type of y is mutated.
CheckResult result = check(R"(
function strange(x, y, z)
x.x = true
y.y = y
z.z = "five"
type Z = {x: typeof(y)} & {x: typeof(x) & typeof(z)}
return ((nil :: any) :: Z)
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
TypeId t = requireType("strange");
auto ftv = get<FunctionTypeVar>(t);
REQUIRE(ftv != nullptr);
std::vector<TypeId> args = flatten(ftv->argTypes).first;
REQUIRE(3 == args.size());
CHECK("{+ x: boolean +}" == toString(args[0]));
CHECK("t1 where t1 = {+ y: t1 +}" == toString(args[1]));
CHECK("{+ z: string +}" == toString(args[2]));
std::vector<TypeId> ret = flatten(ftv->retTypes).first;
REQUIRE(1 == ret.size());
CHECK("{| x: {+ x: boolean, y: t1, z: string +} |} where t1 = {+ y: t1 +}" == toString(ret[0]));
}
TEST_CASE_FIXTURE(Fixture, "intersection_inside_a_table_inside_another_intersection_4")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
// We use a function and inferred parameter types to prevent intermediate normalizations from being performed.
// This exposes a bug where the type of y is mutated.
CheckResult result = check(R"(
function strange(x, y, z)
x.x = true
z.z = "five"
type R = {x: typeof(y)} & {x: typeof(x) & typeof(z)}
local r: R
y.y = r
return r
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
TypeId t = requireType("strange");
auto ftv = get<FunctionTypeVar>(t);
REQUIRE(ftv != nullptr);
std::vector<TypeId> args = flatten(ftv->argTypes).first;
REQUIRE(3 == args.size());
CHECK("{+ x: boolean +}" == toString(args[0]));
CHECK("{+ y: t1 +} where t1 = {| x: {+ x: boolean, y: t1, z: string +} |}" == toString(args[1]));
CHECK("{+ z: string +}" == toString(args[2]));
std::vector<TypeId> ret = flatten(ftv->retTypes).first;
REQUIRE(1 == ret.size());
CHECK("t1 where t1 = {| x: {+ x: boolean, y: t1, z: string +} |}" == toString(ret[0]));
}
TEST_CASE_FIXTURE(Fixture, "nested_table_normalization_with_non_table__no_ice")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
{"LuauNormalizeCombineTableFix", true},
};
// CLI-52787
// ends up combining {_:any} with any, recursively
// which used to ICE because this combines a table with a non-table.
CheckResult result = check(R"(
export type t0 = any & { _: {_:any} } & { _:any }
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "visiting_a_type_twice_is_not_considered_normal")
{
ScopedFastFlag sff{"LuauLowerBoundsCalculation", true};
CheckResult result = check(R"(
--!strict
function f(a, b)
local function g()
if math.random() > 0.5 then
return a()
else
return b
end
end
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("<a>(() -> a, a) -> ()", toString(requireType("f")));
}
TEST_CASE_FIXTURE(Fixture, "fuzz_failure_instersection_combine_must_follow")
{
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
export type t0 = {_:{_:any} & {_:any|string}} & {_:{_:{}}}
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "fuzz_failure_bound_type_is_normal_but_not_its_bounded_to")
{
ScopedFastFlag sff{"LuauLowerBoundsCalculation", true};
CheckResult result = check(R"(
type t252 = ((t0<t252...>)|(any))|(any)
type t0 = t252<t0<any,t24...>,t24...>
)");
LUAU_REQUIRE_ERRORS(result);
}
// We had an issue where a normal BoundTypeVar might point at a non-normal BoundTypeVar if it in turn pointed to a
// normal TypeVar because we were calling follow() in an improper place.
TEST_CASE_FIXTURE(Fixture, "bound_typevars_should_only_be_marked_normal_if_their_pointee_is_normal")
{
ScopedFastFlag sff[]{
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
local T = {}
function T:M()
local function f(a)
print(self.prop)
self:g(a)
self.prop = a
end
end
return T
)");
}
TEST_CASE_FIXTURE(BuiltinsFixture, "skip_force_normal_on_external_types")
{
createSomeClasses(frontend);
@ -1108,68 +472,4 @@ export type t0 = (((any)&({_:l0.t0,n0:t0,_G:any,}))&({_:any,}))&(((any)&({_:l0.t
LUAU_REQUIRE_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "normalize_unions_containing_never")
{
ScopedFastFlag sff{"LuauLowerBoundsCalculation", true};
CheckResult result = check(R"(
type Foo = string | never
local foo: Foo
)");
CHECK_EQ("string", toString(requireType("foo")));
}
TEST_CASE_FIXTURE(Fixture, "normalize_unions_containing_unknown")
{
ScopedFastFlag sff{"LuauLowerBoundsCalculation", true};
CheckResult result = check(R"(
type Foo = string | unknown
local foo: Foo
)");
CHECK_EQ("unknown", toString(requireType("foo")));
}
TEST_CASE_FIXTURE(Fixture, "any_wins_the_battle_over_unknown_in_unions")
{
ScopedFastFlag sff{"LuauLowerBoundsCalculation", true};
CheckResult result = check(R"(
type Foo = unknown | any
local foo: Foo
type Bar = any | unknown
local bar: Bar
)");
CHECK_EQ("any", toString(requireType("foo")));
CHECK_EQ("any", toString(requireType("bar")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "normalization_does_not_convert_ever")
{
ScopedFastFlag sff[]{
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
--!strict
local function f()
if math.random() > 0.5 then
return true
end
type Ret = typeof(f())
if math.random() > 0.5 then
return "something"
end
return "something" :: Ret
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("() -> boolean | string", toString(requireType("f")));
}
TEST_SUITE_END();

View File

@ -15,8 +15,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
struct LimitFixture : BuiltinsFixture
{
#if defined(_NOOPT) || defined(_DEBUG)
@ -267,10 +265,7 @@ TEST_CASE_FIXTURE(LimitFixture, "typescript_port_of_Result_type")
CheckResult result = check(src);
CodeTooComplex ctc;
if (FFlag::LuauLowerBoundsCalculation)
LUAU_REQUIRE_ERRORS(result);
else
CHECK(hasError(result, &ctc));
CHECK(hasError(result, &ctc));
}
TEST_SUITE_END();

View File

@ -7,8 +7,6 @@
#include "doctest.h"
LUAU_FASTFLAG(LuauLowerBoundsCalculation)
using namespace Luau;
struct ToDotClassFixture : Fixture
@ -111,29 +109,7 @@ local function f(a, ...: string) return a end
ToDotOptions opts;
opts.showPointers = false;
if (FFlag::LuauLowerBoundsCalculation)
{
CHECK_EQ(R"(digraph graphname {
n1 [label="FunctionTypeVar 1"];
n1 -> n2 [label="arg"];
n2 [label="TypePack 2"];
n2 -> n3;
n3 [label="GenericTypeVar 3"];
n2 -> n4 [label="tail"];
n4 [label="VariadicTypePack 4"];
n4 -> n5;
n5 [label="string"];
n1 -> n6 [label="ret"];
n6 [label="TypePack 6"];
n6 -> n7;
n7 [label="BoundTypeVar 7"];
n7 -> n3;
})",
toDot(requireType("f"), opts));
}
else
{
CHECK_EQ(R"(digraph graphname {
CHECK_EQ(R"(digraph graphname {
n1 [label="FunctionTypeVar 1"];
n1 -> n2 [label="arg"];
n2 [label="TypePack 2"];
@ -149,8 +125,7 @@ n6 -> n7;
n7 [label="TypePack 7"];
n7 -> n3;
})",
toDot(requireType("f"), opts));
}
toDot(requireType("f"), opts));
}
TEST_CASE_FIXTURE(Fixture, "union")

View File

@ -12,6 +12,7 @@ using namespace Luau;
LUAU_FASTFLAG(LuauRecursiveTypeParameterRestriction);
LUAU_FASTFLAG(LuauSpecialTypesAsterisked);
LUAU_FASTFLAG(LuauFixNameMaps);
LUAU_FASTFLAG(LuauFunctionReturnStringificationFixup);
TEST_SUITE_BEGIN("ToString");
@ -570,6 +571,22 @@ TEST_CASE_FIXTURE(Fixture, "toString_the_boundTo_table_type_contained_within_a_T
CHECK_EQ("{| hello: number, world: number |}", toString(&tpv2));
}
TEST_CASE_FIXTURE(Fixture, "no_parentheses_around_return_type_if_pack_has_an_empty_head_link")
{
TypeArena arena;
TypePackId realTail = arena.addTypePack({singletonTypes->stringType});
TypePackId emptyTail = arena.addTypePack({}, realTail);
TypePackId argList = arena.addTypePack({singletonTypes->stringType});
TypeId functionType = arena.addType(FunctionTypeVar{argList, emptyTail});
if (FFlag::LuauFunctionReturnStringificationFixup)
CHECK("(string) -> string" == toString(functionType));
else
CHECK("(string) -> (string)" == toString(functionType));
}
TEST_CASE_FIXTURE(Fixture, "no_parentheses_around_cyclic_function_type_in_union")
{
CheckResult result = check(R"(

View File

@ -657,50 +657,9 @@ struct AssertionCatcher
int AssertionCatcher::tripped;
} // namespace
TEST_CASE_FIXTURE(Fixture, "luau_ice_triggers_an_ice")
{
ScopedFastFlag sffs[] = {
{"DebugLuauMagicTypes", true},
{"LuauUseInternalCompilerErrorException", false},
};
AssertionCatcher ac;
CHECK_THROWS_AS(check(R"(
local a: _luau_ice = 55
)"),
std::runtime_error);
LUAU_ASSERT(1 == AssertionCatcher::tripped);
}
TEST_CASE_FIXTURE(Fixture, "luau_ice_triggers_an_ice_handler")
{
ScopedFastFlag sffs[] = {
{"DebugLuauMagicTypes", true},
{"LuauUseInternalCompilerErrorException", false},
};
bool caught = false;
frontend.iceHandler.onInternalError = [&](const char*) {
caught = true;
};
CHECK_THROWS_AS(check(R"(
local a: _luau_ice = 55
)"),
std::runtime_error);
CHECK_EQ(true, caught);
}
TEST_CASE_FIXTURE(Fixture, "luau_ice_triggers_an_ice_exception_with_flag")
{
ScopedFastFlag sffs[] = {
{"DebugLuauMagicTypes", true},
{"LuauUseInternalCompilerErrorException", true},
};
ScopedFastFlag sffs{"DebugLuauMagicTypes", true};
AssertionCatcher ac;
@ -714,10 +673,7 @@ TEST_CASE_FIXTURE(Fixture, "luau_ice_triggers_an_ice_exception_with_flag")
TEST_CASE_FIXTURE(Fixture, "luau_ice_triggers_an_ice_exception_with_flag_handler")
{
ScopedFastFlag sffs[] = {
{"DebugLuauMagicTypes", true},
{"LuauUseInternalCompilerErrorException", true},
};
ScopedFastFlag sffs{"DebugLuauMagicTypes", true};
bool caught = false;

View File

@ -8,9 +8,7 @@
using namespace Luau;
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
LUAU_FASTFLAG(LuauSpecialTypesAsterisked);
LUAU_FASTFLAG(LuauStringFormatArgumentErrorFix)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
TEST_SUITE_BEGIN("BuiltinTests");
@ -637,8 +635,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "select_with_decimal_argument_is_rounded_down
// Could be flaky if the fix has regressed.
TEST_CASE_FIXTURE(BuiltinsFixture, "bad_select_should_not_crash")
{
ScopedFastFlag luauFunctionArgMismatchDetails{"LuauFunctionArgMismatchDetails", true};
CheckResult result = check(R"(
do end
local _ = function(l0,...)
@ -754,14 +750,7 @@ TEST_CASE_FIXTURE(Fixture, "string_format_use_correct_argument")
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauStringFormatArgumentErrorFix)
{
CHECK_EQ("Argument count mismatch. Function expects 2 arguments, but 3 are specified", toString(result.errors[0]));
}
else
{
CHECK_EQ("Argument count mismatch. Function expects 1 argument, but 2 are specified", toString(result.errors[0]));
}
CHECK_EQ("Argument count mismatch. Function expects 2 arguments, but 3 are specified", toString(result.errors[0]));
}
TEST_CASE_FIXTURE(Fixture, "string_format_use_correct_argument2")
@ -778,8 +767,6 @@ TEST_CASE_FIXTURE(Fixture, "string_format_use_correct_argument2")
TEST_CASE_FIXTURE(BuiltinsFixture, "string_format_use_correct_argument3")
{
ScopedFastFlag LuauStringFormatArgumentErrorFix{"LuauStringFormatArgumentErrorFix", true};
CheckResult result = check(R"(
local s1 = string.format("%d")
local s2 = string.format("%d", 1)
@ -966,10 +953,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "assert_removes_falsy_types")
)");
LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauLowerBoundsCalculation)
CHECK_EQ("((boolean | number)?) -> number | true", toString(requireType("f")));
else
CHECK_EQ("((boolean | number)?) -> boolean | number", toString(requireType("f")));
CHECK_EQ("((boolean | number)?) -> boolean | number", toString(requireType("f")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "assert_removes_falsy_types2")
@ -1040,8 +1024,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "table_freeze_is_generic")
TEST_CASE_FIXTURE(BuiltinsFixture, "set_metatable_needs_arguments")
{
ScopedFastFlag luauFunctionArgMismatchDetails{"LuauFunctionArgMismatchDetails", true};
ScopedFastFlag sff{"LuauSetMetaTableArgsCheck", true};
CheckResult result = check(R"(
local a = {b=setmetatable}

View File

@ -482,7 +482,7 @@ local a: ChildClass = i
TEST_CASE_FIXTURE(ClassFixture, "intersections_of_unions_of_classes")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -499,7 +499,7 @@ TEST_CASE_FIXTURE(ClassFixture, "intersections_of_unions_of_classes")
TEST_CASE_FIXTURE(ClassFixture, "unions_of_intersections_of_classes")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};

View File

@ -14,7 +14,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
LUAU_FASTFLAG(LuauInstantiateInSubtyping);
LUAU_FASTFLAG(LuauSpecialTypesAsterisked);
@ -299,22 +298,6 @@ TEST_CASE_FIXTURE(Fixture, "cyclic_function_type_in_rets")
CHECK_EQ("t1 where t1 = () -> t1", toString(requireType("f")));
}
TEST_CASE_FIXTURE(Fixture, "cyclic_function_type_in_args")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
function f(g)
return f(f)
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("t1 where t1 = <a...>(t1) -> (a...)", toString(requireType("f")));
}
TEST_CASE_FIXTURE(Fixture, "another_higher_order_function")
{
CheckResult result = check(R"(
@ -1132,16 +1115,13 @@ f(function(x) return x * 2 end)
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Type 'number' could not be converted into 'Table'", toString(result.errors[0]));
if (!FFlag::LuauLowerBoundsCalculation)
{
// Return type doesn't inference 'nil'
result = check(R"(
function f(a: (number) -> nil) return a(4) end
f(function(x) print(x) end)
)");
// Return type doesn't inference 'nil'
result = check(R"(
function f(a: (number) -> nil) return a(4) end
f(function(x) print(x) end)
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "infer_anonymous_function_arguments")
@ -1244,16 +1224,13 @@ f(function(x) return x * 2 end)
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Type 'number' could not be converted into 'Table'", toString(result.errors[0]));
if (!FFlag::LuauLowerBoundsCalculation)
{
// Return type doesn't inference 'nil'
result = check(R"(
function f(a: (number) -> nil) return a(4) end
f(function(x) print(x) end)
)");
// Return type doesn't inference 'nil'
result = check(R"(
function f(a: (number) -> nil) return a(4) end
f(function(x) print(x) end)
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "infer_anonymous_function_arguments_outside_call")
@ -1436,87 +1413,6 @@ end
CHECK_EQ(toString(result.errors[1]), R"(Type 'string' could not be converted into 'number')");
}
TEST_CASE_FIXTURE(Fixture, "inconsistent_return_types")
{
const ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
function foo(a: boolean, b: number)
if a then
return nil
else
return b
end
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("(boolean, number) -> number?", toString(requireType("foo")));
// TODO: Test multiple returns
// Think of various cases where typepacks need to grow. maybe consult other tests
// Basic normalization of ConstrainedTypeVars during quantification
}
TEST_CASE_FIXTURE(Fixture, "inconsistent_higher_order_function")
{
const ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
function foo(f)
f(5)
f("six")
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("<a...>((number | string) -> (a...)) -> ()", toString(requireType("foo")));
}
/* The bug here is that we are using the same level 2.0 for both the body of resolveDispatcher and the
* lambda useCallback.
*
* I think what we want to do is, at each scope level, never reuse the same sublevel.
*
* We also adjust checkBlock to consider the syntax `local x = function() ... end` to be sortable
* in the same way as `local function x() ... end`. This causes the function `resolveDispatcher` to be
* checked before the lambda.
*/
TEST_CASE_FIXTURE(Fixture, "inferred_higher_order_functions_are_quantified_at_the_right_time")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
--!strict
local function resolveDispatcher()
return (nil :: any) :: {useCallback: (any) -> any}
end
local useCallback = function(deps: any)
return resolveDispatcher().useCallback(deps)
end
)");
// LUAU_REQUIRE_NO_ERRORS is particularly unhelpful when this test is broken.
// You get a TypeMismatch error where both types stringify the same.
CHECK(result.errors.empty());
if (!result.errors.empty())
{
for (const auto& e : result.errors)
printf("%s: %s\n", toString(e.location).c_str(), toString(e).c_str());
}
}
TEST_CASE_FIXTURE(Fixture, "inferred_higher_order_functions_are_quantified_at_the_right_time2")
{
CheckResult result = check(R"(
@ -1700,56 +1596,6 @@ TEST_CASE_FIXTURE(Fixture, "occurs_check_failure_in_function_return_type")
CHECK(nullptr != get<OccursCheckFailed>(result.errors[0]));
}
TEST_CASE_FIXTURE(Fixture, "quantify_constrained_types")
{
ScopedFastFlag sff[]{
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
--!strict
local function foo(f)
f(5)
f("hi")
local function g()
return f
end
local h = g()
h(true)
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("<a...>((boolean | number | string) -> (a...)) -> ()", toString(requireType("foo")));
}
TEST_CASE_FIXTURE(Fixture, "call_o_with_another_argument_after_foo_was_quantified")
{
ScopedFastFlag sff[]{
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
local function f(o)
local t = {}
t[o] = true
local function foo(o)
o.m1(5)
t[o] = nil
end
o.m1("hi")
return t
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
// TODO: check the normalized type of f
}
TEST_CASE_FIXTURE(Fixture, "free_is_not_bound_to_unknown")
{
CheckResult result = check(R"(
@ -1800,8 +1646,6 @@ TEST_CASE_FIXTURE(Fixture, "dont_mutate_the_underlying_head_of_typepack_when_cal
TEST_CASE_FIXTURE(BuiltinsFixture, "improved_function_arg_mismatch_errors")
{
ScopedFastFlag luauFunctionArgMismatchDetails{"LuauFunctionArgMismatchDetails", true};
CheckResult result = check(R"(
local function foo1(a: number) end
foo1()
@ -1838,8 +1682,6 @@ u.a.foo()
// This might be surprising, but since 'any' became optional, unannotated functions in non-strict 'expect' 0 arguments
TEST_CASE_FIXTURE(BuiltinsFixture, "improved_function_arg_mismatch_error_nonstrict")
{
ScopedFastFlag luauFunctionArgMismatchDetails{"LuauFunctionArgMismatchDetails", true};
CheckResult result = check(R"(
--!nonstrict
local function foo(a, b) end

View File

@ -9,7 +9,6 @@
#include "doctest.h"
LUAU_FASTFLAG(LuauCheckGenericHOFTypes)
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
@ -783,8 +782,6 @@ local TheDispatcher: Dispatcher = {
TEST_CASE_FIXTURE(Fixture, "generic_argument_count_too_few")
{
ScopedFastFlag luauFunctionArgMismatchDetails{"LuauFunctionArgMismatchDetails", true};
CheckResult result = check(R"(
function test(a: number)
return 1
@ -802,8 +799,6 @@ wrapper(test)
TEST_CASE_FIXTURE(Fixture, "generic_argument_count_too_many")
{
ScopedFastFlag luauFunctionArgMismatchDetails{"LuauFunctionArgMismatchDetails", true};
CheckResult result = check(R"(
function test2(a: number, b: string)
return 1
@ -965,7 +960,6 @@ TEST_CASE_FIXTURE(Fixture, "instantiate_generic_function_in_assignments")
CHECK_EQ("<a, b...>((a) -> (b...), a) -> (b...)", toString(tm->givenType));
else
CHECK_EQ("((number) -> number, number) -> number", toString(tm->givenType));
}
TEST_CASE_FIXTURE(Fixture, "instantiate_generic_function_in_assignments2")
@ -1114,27 +1108,7 @@ local b = sumrec(sum) -- ok
local c = sumrec(function(x, y, f) return f(x, y) end) -- type binders are not inferred
)");
if (FFlag::LuauCheckGenericHOFTypes)
{
LUAU_REQUIRE_NO_ERRORS(result);
}
else if (FFlag::LuauInstantiateInSubtyping)
{
LUAU_REQUIRE_ERRORS(result);
CHECK_EQ(
R"(Type '<a, b, c...>(a, b, (a, b) -> (c...)) -> (c...)' could not be converted into '<a>(a, a, (a, a) -> a) -> a'
caused by:
Argument #1 type is not compatible. Generic subtype escaping scope)",
toString(result.errors[0]));
}
else
{
LUAU_REQUIRE_ERRORS(result);
CHECK_EQ(
"Type '(a, b, (a, b) -> (c...)) -> (c...)' could not be converted into '<a>(a, a, (a, a) -> a) -> a'; different number of generic type "
"parameters",
toString(result.errors[0]));
}
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "substitution_with_bound_table")
@ -1258,7 +1232,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "higher_rank_polymorphism_should_not_accept_i
{
ScopedFastFlag sffs[] = {
{"LuauInstantiateInSubtyping", true},
{"LuauCheckGenericHOFTypes", true}, // necessary because of interactions with the test
};
CheckResult result = check(R"(

View File

@ -8,7 +8,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
TEST_SUITE_BEGIN("IntersectionTypes");
@ -306,10 +305,7 @@ TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauLowerBoundsCalculation)
CHECK_EQ(toString(result.errors[0]), "Cannot add property 'z' to table '{| x: number, y: number |}'");
else
CHECK_EQ(toString(result.errors[0]), "Cannot add property 'z' to table 'X & Y'");
CHECK_EQ(toString(result.errors[0]), "Cannot add property 'z' to table 'X & Y'");
}
TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed_indirect")
@ -333,16 +329,9 @@ TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed_indirect")
CHECK_EQ(toString(result.errors[0]), R"(Type '(string, number) -> string' could not be converted into '(string) -> string'
caused by:
Argument count mismatch. Function expects 2 arguments, but only 1 is specified)");
if (FFlag::LuauLowerBoundsCalculation)
CHECK_EQ(toString(result.errors[1]), "Cannot add property 'z' to table '{| x: (number) -> number, y: (string) -> string |}'");
else
CHECK_EQ(toString(result.errors[1]), "Cannot add property 'z' to table 'X & Y'");
CHECK_EQ(toString(result.errors[1]), "Cannot add property 'z' to table 'X & Y'");
CHECK_EQ(toString(result.errors[2]), "Type 'number' could not be converted into 'string'");
if (FFlag::LuauLowerBoundsCalculation)
CHECK_EQ(toString(result.errors[3]), "Cannot add property 'w' to table '{| x: (number) -> number, y: (string) -> string |}'");
else
CHECK_EQ(toString(result.errors[3]), "Cannot add property 'w' to table 'X & Y'");
CHECK_EQ(toString(result.errors[3]), "Cannot add property 'w' to table 'X & Y'");
}
TEST_CASE_FIXTURE(Fixture, "table_write_sealed_indirect")
@ -381,15 +370,11 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "table_intersection_setmetatable")
TEST_CASE_FIXTURE(Fixture, "error_detailed_intersection_part")
{
ScopedFastFlag flags[] = {{"LuauLowerBoundsCalculation", false}};
CheckResult result = check(R"(
type X = { x: number }
type Y = { y: number }
type Z = { z: number }
type XYZ = X & Y & Z
local a: XYZ = 3
)");
@ -401,15 +386,11 @@ caused by:
TEST_CASE_FIXTURE(Fixture, "error_detailed_intersection_all")
{
ScopedFastFlag flags[] = {{"LuauLowerBoundsCalculation", false}};
CheckResult result = check(R"(
type X = { x: number }
type Y = { y: number }
type Z = { z: number }
type XYZ = X & Y & Z
local a: XYZ
local b: number = a
)");
@ -468,12 +449,13 @@ TEST_CASE_FIXTURE(Fixture, "intersect_false_and_bool_and_false")
LUAU_REQUIRE_ERROR_COUNT(1, result);
// TODO: odd stringification of `false & (boolean & false)`.)
CHECK_EQ(toString(result.errors[0]), "Type 'boolean & false & false' could not be converted into 'true'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]),
"Type 'boolean & false & false' could not be converted into 'true'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "intersect_saturate_overloaded_functions")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -485,12 +467,13 @@ TEST_CASE_FIXTURE(Fixture, "intersect_saturate_overloaded_functions")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((number?) -> number?) & ((string?) -> string?)' could not be converted into '(number) -> number'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((number?) -> number?) & ((string?) -> string?)' could not be converted into '(number) -> number'; "
"none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "union_saturate_overloaded_functions")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -502,12 +485,13 @@ TEST_CASE_FIXTURE(Fixture, "union_saturate_overloaded_functions")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((number) -> number) & ((string) -> string)' could not be converted into '(boolean | number) -> boolean | number'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((number) -> number) & ((string) -> string)' could not be converted into '(boolean | number) -> "
"boolean | number'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -519,7 +503,8 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '{| p: number?, q: number?, r: number? |} & {| p: number?, q: string? |}' could not be converted into '{| p: nil |}'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '{| p: number?, q: number?, r: number? |} & {| p: number?, q: string? |}' could not be converted into "
"'{| p: nil |}'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_top_properties")
@ -531,12 +516,13 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_top_properties")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '{| p: number?, q: any |} & {| p: unknown, q: string? |}' could not be converted into '{| p: string?, q: number? |}'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '{| p: number?, q: any |} & {| p: unknown, q: string? |}' could not be converted into '{| p: string?, "
"q: number? |}'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_never_properties")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -549,12 +535,13 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_never_properties")
// TODO: this should not produce type errors, since never <: { p : never }
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '{| p: never, q: string? |} & {| p: number?, q: never |}' could not be converted into 'never'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '{| p: never, q: string? |} & {| p: number?, q: never |}' could not be converted into 'never'; none "
"of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloaded_functions_returning_intersections")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -566,12 +553,14 @@ TEST_CASE_FIXTURE(Fixture, "overloaded_functions_returning_intersections")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((number?) -> {| p: number |} & {| q: number |}) & ((string?) -> {| p: number |} & {| r: number |})' could not be converted into '(number?) -> {| p: number, q: number, r: number |}'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]),
"Type '((number?) -> {| p: number |} & {| q: number |}) & ((string?) -> {| p: number |} & {| r: number |})' could not be converted into "
"'(number?) -> {| p: number, q: number, r: number |}'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generic")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -585,12 +574,13 @@ TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generic")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((number?) -> a | number) & ((string?) -> a | string)' could not be converted into '(number?) -> a'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((number?) -> a | number) & ((string?) -> a | string)' could not be converted into '(number?) -> a'; "
"none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generics")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -604,12 +594,13 @@ TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generics")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((a?) -> a | b) & ((c?) -> b | c)' could not be converted into '(a?) -> (a & c) | b'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]),
"Type '((a?) -> a | b) & ((c?) -> b | c)' could not be converted into '(a?) -> (a & c) | b'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generic_packs")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -623,12 +614,13 @@ TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generic_packs")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((number?, a...) -> (number?, b...)) & ((string?, a...) -> (string?, b...))' could not be converted into '(nil, b...) -> (nil, a...)'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((number?, a...) -> (number?, b...)) & ((string?, a...) -> (string?, b...))' could not be converted "
"into '(nil, b...) -> (nil, a...)'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_unknown_result")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -642,12 +634,13 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_unknown_result")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((nil) -> unknown) & ((number) -> number)' could not be converted into '(number?) -> number?'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((nil) -> unknown) & ((number) -> number)' could not be converted into '(number?) -> number?'; none "
"of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_unknown_arguments")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -661,12 +654,13 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_unknown_arguments")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((number) -> number?) & ((unknown) -> string?)' could not be converted into '(number?) -> nil'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((number) -> number?) & ((unknown) -> string?)' could not be converted into '(number?) -> nil'; none "
"of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_never_result")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -680,12 +674,13 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_never_result")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((nil) -> never) & ((number) -> number)' could not be converted into '(number?) -> never'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((nil) -> never) & ((number) -> number)' could not be converted into '(number?) -> never'; none of "
"the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_never_arguments")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -699,7 +694,8 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_never_arguments")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((never) -> string?) & ((number) -> number?)' could not be converted into '(number?) -> nil'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((never) -> string?) & ((number) -> number?)' could not be converted into '(number?) -> nil'; none "
"of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_overlapping_results_and_variadics")
@ -711,7 +707,8 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_overlapping_results_and_
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((number?) -> (...number)) & ((string?) -> number | string)' could not be converted into '(number | string) -> (number, number?)'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((number?) -> (...number)) & ((string?) -> number | string)' could not be converted into '(number | "
"string) -> (number, number?)'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_1")
@ -725,7 +722,8 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_1")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '(() -> (a...)) & (() -> (b...))' could not be converted into '() -> ()'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]),
"Type '(() -> (a...)) & (() -> (b...))' could not be converted into '() -> ()'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_2")
@ -739,7 +737,8 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_2")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((a...) -> ()) & ((b...) -> ())' could not be converted into '() -> ()'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]),
"Type '((a...) -> ()) & ((b...) -> ())' could not be converted into '() -> ()'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_3")
@ -753,7 +752,8 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_3")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '(() -> (a...)) & (() -> (number?, a...))' could not be converted into '() -> number'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]),
"Type '(() -> (a...)) & (() -> (number?, a...))' could not be converted into '() -> number'; none of the intersection parts are compatible");
}
TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_4")
@ -767,12 +767,13 @@ TEST_CASE_FIXTURE(Fixture, "overloadeded_functions_with_weird_typepacks_4")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '((a...) -> ()) & ((number, a...) -> number)' could not be converted into '(number?) -> ()'; none of the intersection parts are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '((a...) -> ()) & ((number, a...) -> number)' could not be converted into '(number?) -> ()'; none of "
"the intersection parts are compatible");
}
TEST_CASE_FIXTURE(BuiltinsFixture, "intersect_metatables")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -800,7 +801,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "intersect_metatables")
TEST_CASE_FIXTURE(BuiltinsFixture, "intersect_metatable_subtypes")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -826,7 +827,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "intersect_metatable_subtypes")
TEST_CASE_FIXTURE(BuiltinsFixture, "intersect_metatables_with_properties")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -849,7 +850,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "intersect_metatables_with_properties")
TEST_CASE_FIXTURE(BuiltinsFixture, "intersect_metatable_with table")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -874,7 +875,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "intersect_metatable_with table")
TEST_CASE_FIXTURE(Fixture, "CLI-44817")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};

View File

@ -622,9 +622,9 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "loop_iter_metamethod_not_enough_returns")
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK(result.errors[0] == TypeError{
Location{{2, 36}, {2, 37}},
GenericError{"__iter must return at least one value"},
});
Location{{2, 36}, {2, 37}},
GenericError{"__iter must return at least one value"},
});
}
TEST_CASE_FIXTURE(BuiltinsFixture, "loop_iter_metamethod_ok")

View File

@ -254,20 +254,20 @@ return m
if (FFlag::LuauInstantiateInSubtyping)
{
// though this didn't error before the flag, it seems as though it should error since fields of a table are invariant.
// the user's intent would likely be that these "method" fields would be read-only, but without an annotation, accepting this should be unsound.
// the user's intent would likely be that these "method" fields would be read-only, but without an annotation, accepting this should be
// unsound.
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(R"(Type 'n' could not be converted into 't1 where t1 = {- Clone: (t1) -> (a...) -}'
caused by:
Property 'Clone' is not compatible. Type '<a>(a) -> ()' could not be converted into 't1 where t1 = ({- Clone: t1 -}) -> (a...)'; different number of generic type parameters)",
toString(result.errors[0]));
toString(result.errors[0]));
}
else
{
LUAU_REQUIRE_NO_ERRORS(result);
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "custom_require_global")

View File

@ -7,8 +7,6 @@
#include <algorithm>
LUAU_FASTFLAG(LuauLowerBoundsCalculation)
using namespace Luau;
TEST_SUITE_BEGIN("ProvisionalTests");
@ -301,19 +299,10 @@ TEST_CASE_FIXTURE(Fixture, "do_not_ice_when_trying_to_pick_first_of_generic_type
LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauLowerBoundsCalculation)
{
CHECK_EQ("() -> ()", toString(requireType("f")));
CHECK_EQ("() -> ()", toString(requireType("g")));
CHECK_EQ("nil", toString(requireType("x")));
}
else
{
// f and g should have the type () -> ()
CHECK_EQ("() -> (a...)", toString(requireType("f")));
CHECK_EQ("<a...>() -> (a...)", toString(requireType("g")));
CHECK_EQ("any", toString(requireType("x"))); // any is returned instead of ICE for now
}
// f and g should have the type () -> ()
CHECK_EQ("() -> (a...)", toString(requireType("f")));
CHECK_EQ("<a...>() -> (a...)", toString(requireType("g")));
CHECK_EQ("any", toString(requireType("x"))); // any is returned instead of ICE for now
}
TEST_CASE_FIXTURE(Fixture, "specialization_binds_with_prototypes_too_early")
@ -330,7 +319,6 @@ TEST_CASE_FIXTURE(Fixture, "specialization_binds_with_prototypes_too_early")
TEST_CASE_FIXTURE(Fixture, "weird_fail_to_unify_type_pack")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", false},
// I'm not sure why this is broken without DCR, but it seems to be fixed
// when DCR is enabled.
{"DebugLuauDeferredConstraintResolution", false},
@ -347,7 +335,6 @@ TEST_CASE_FIXTURE(Fixture, "weird_fail_to_unify_type_pack")
TEST_CASE_FIXTURE(Fixture, "weird_fail_to_unify_variadic_pack")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", false},
// I'm not sure why this is broken without DCR, but it seems to be fixed
// when DCR is enabled.
{"DebugLuauDeferredConstraintResolution", false},
@ -362,56 +349,6 @@ TEST_CASE_FIXTURE(Fixture, "weird_fail_to_unify_variadic_pack")
LUAU_REQUIRE_ERRORS(result); // Should not have any errors.
}
TEST_CASE_FIXTURE(Fixture, "lower_bounds_calculation_is_too_permissive_with_overloaded_higher_order_functions")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
function foo(f)
f(5, 'a')
f('b', 6)
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
// We incorrectly infer that the argument to foo could be called with (number, number) or (string, string)
// even though that is strictly more permissive than the actual source text shows.
CHECK("<a...>((number | string, number | string) -> (a...)) -> ()" == toString(requireType("foo")));
}
// Once fixed, move this to Normalize.test.cpp
TEST_CASE_FIXTURE(Fixture, "normalization_fails_on_certain_kinds_of_cyclic_tables")
{
#if defined(_DEBUG) || defined(_NOOPT)
ScopedFastInt sfi("LuauNormalizeIterationLimit", 500);
#endif
ScopedFastFlag flags[] = {
{"LuauLowerBoundsCalculation", true},
};
// We use a function and inferred parameter types to prevent intermediate normalizations from being performed.
// This exposes a bug where the type of y is mutated.
CheckResult result = check(R"(
function strange(x, y)
x.x = y
y.x = x
type R = {x: typeof(x)} & {x: typeof(y)}
local r: R
return r
end
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK(nullptr != get<NormalizationTooComplex>(result.errors[0]));
}
// Belongs in TypeInfer.builtins.test.cpp.
TEST_CASE_FIXTURE(BuiltinsFixture, "pcall_returns_at_least_two_value_but_function_returns_nothing")
{
@ -473,36 +410,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "function_returns_many_things_but_first_of_it
CHECK_EQ("boolean", toString(requireType("b")));
}
TEST_CASE_FIXTURE(Fixture, "constrained_is_level_dependent")
{
ScopedFastFlag sff[]{
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
local function f(o)
local t = {}
t[o] = true
local function foo(o)
o:m1()
t[o] = nil
end
local function bar(o)
o:m2()
t[o] = true
end
return t
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
// TODO: We're missing generics b...
CHECK_EQ("<a...>(t1) -> {| [t1]: boolean |} where t1 = t2 ; t2 = {+ m1: (t1) -> (a...), m2: (t2) -> (b...) +}", toString(requireType("f")));
}
TEST_CASE_FIXTURE(Fixture, "free_is_not_bound_to_any")
{
CheckResult result = check(R"(
@ -695,4 +602,187 @@ return wrapStrictTable(Constants, "Constants")
CHECK(get<AnyTypeVar>(*result));
}
// We need a simplification step to make this do the right thing. ("normalization-lite")
TEST_CASE_FIXTURE(BuiltinsFixture, "table_insert_with_a_singleton_argument")
{
CheckResult result = check(R"(
local function foo(t, x)
if x == "hi" or x == "bye" then
table.insert(t, x)
end
return t
end
local t = foo({}, "hi")
table.insert(t, "totally_unrelated_type" :: "totally_unrelated_type")
)");
LUAU_REQUIRE_NO_ERRORS(result);
// We'd really like for this to be {string}
CHECK_EQ("{string | string}", toString(requireType("t")));
}
struct NormalizeFixture : Fixture
{
bool isSubtype(TypeId a, TypeId b)
{
return ::Luau::isSubtype(a, b, NotNull{getMainModule()->getModuleScope().get()}, singletonTypes, ice);
}
};
TEST_CASE_FIXTURE(NormalizeFixture, "intersection_of_functions_of_different_arities")
{
check(R"(
type A = (any) -> ()
type B = (any, any) -> ()
type T = A & B
local a: A
local b: B
local t: T
)");
[[maybe_unused]] TypeId a = requireType("a");
[[maybe_unused]] TypeId b = requireType("b");
// CHECK(!isSubtype(a, b)); // !!
// CHECK(!isSubtype(b, a));
CHECK("((any) -> ()) & ((any, any) -> ())" == toString(requireType("t")));
}
TEST_CASE_FIXTURE(NormalizeFixture, "functions_with_mismatching_arity")
{
check(R"(
local a: (number) -> ()
local b: () -> ()
local c: () -> number
)");
TypeId a = requireType("a");
TypeId b = requireType("b");
TypeId c = requireType("c");
// CHECK(!isSubtype(b, a));
// CHECK(!isSubtype(c, a));
CHECK(!isSubtype(a, b));
// CHECK(!isSubtype(c, b));
CHECK(!isSubtype(a, c));
CHECK(!isSubtype(b, c));
}
TEST_CASE_FIXTURE(NormalizeFixture, "functions_with_mismatching_arity_but_optional_parameters")
{
/*
* (T0..TN) <: (T0..TN, A?)
* (T0..TN) <: (T0..TN, any)
* (T0..TN, A?) </: (T0..TN) We don't technically need to spell this out, but it's quite important.
* T <: T
* if A <: B and B <: C then A <: C
* T -> R <: U -> S if U <: T and R <: S
* A | B <: T if A <: T and B <: T
* T <: A | B if T <: A or T <: B
*/
check(R"(
local a: (number?) -> ()
local b: (number) -> ()
local c: (number, number?) -> ()
)");
TypeId a = requireType("a");
TypeId b = requireType("b");
TypeId c = requireType("c");
/*
* (number) -> () </: (number?) -> ()
* because number? </: number (because number <: number, but nil </: number)
*/
CHECK(!isSubtype(b, a));
/*
* (number, number?) </: (number?) -> ()
* because number? </: number (as above)
*/
CHECK(!isSubtype(c, a));
/*
* (number?) -> () <: (number) -> ()
* because number <: number? (because number <: number)
*/
CHECK(isSubtype(a, b));
/*
* (number, number?) -> () <: (number) -> (number)
* The packs have inequal lengths, but (number) <: (number, number?)
* and number <: number
*/
// CHECK(!isSubtype(c, b));
/*
* (number?) -> () </: (number, number?) -> ()
* because (number, number?) </: (number)
*/
// CHECK(!isSubtype(a, c));
/*
* (number) -> () </: (number, number?) -> ()
* because (number, number?) </: (number)
*/
// CHECK(!isSubtype(b, c));
}
TEST_CASE_FIXTURE(NormalizeFixture, "functions_with_mismatching_arity_but_any_is_an_optional_param")
{
check(R"(
local a: (number?) -> ()
local b: (number) -> ()
local c: (number, any) -> ()
)");
TypeId a = requireType("a");
TypeId b = requireType("b");
TypeId c = requireType("c");
/*
* (number) -> () </: (number?) -> ()
* because number? </: number (because number <: number, but nil </: number)
*/
CHECK(!isSubtype(b, a));
/*
* (number, any) </: (number?) -> ()
* because number? </: number (as above)
*/
CHECK(!isSubtype(c, a));
/*
* (number?) -> () <: (number) -> ()
* because number <: number? (because number <: number)
*/
CHECK(isSubtype(a, b));
/*
* (number, any) -> () </: (number) -> (number)
* The packs have inequal lengths
*/
// CHECK(!isSubtype(c, b));
/*
* (number?) -> () </: (number, any) -> ()
* The packs have inequal lengths
*/
// CHECK(!isSubtype(a, c));
/*
* (number) -> () </: (number, any) -> ()
* The packs have inequal lengths
*/
// CHECK(!isSubtype(b, c));
}
TEST_SUITE_END();

View File

@ -7,7 +7,6 @@
#include "doctest.h"
LUAU_FASTFLAG(LuauLowerBoundsCalculation)
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
using namespace Luau;
@ -608,10 +607,7 @@ TEST_CASE_FIXTURE(Fixture, "type_guard_can_filter_for_intersection_of_tables")
LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauLowerBoundsCalculation)
CHECK_EQ("{| x: number, y: number |}", toString(requireTypeAtPosition({4, 28})));
else
CHECK_EQ("{| x: number |} & {| y: number |}", toString(requireTypeAtPosition({4, 28})));
CHECK_EQ("{| x: number |} & {| y: number |}", toString(requireTypeAtPosition({4, 28})));
CHECK_EQ("nil", toString(requireTypeAtPosition({6, 28})));
}

View File

@ -421,28 +421,6 @@ TEST_CASE_FIXTURE(Fixture, "widening_happens_almost_everywhere_except_for_tables
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "table_insert_with_a_singleton_argument")
{
ScopedFastFlag sff{"LuauLowerBoundsCalculation", true};
CheckResult result = check(R"(
local function foo(t, x)
if x == "hi" or x == "bye" then
table.insert(t, x)
end
return t
end
local t = foo({}, "hi")
table.insert(t, "totally_unrelated_type" :: "totally_unrelated_type")
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ("{string}", toString(requireType("t")));
}
TEST_CASE_FIXTURE(Fixture, "functions_are_not_to_be_widened")
{
CheckResult result = check(R"(

View File

@ -11,7 +11,6 @@
using namespace Luau;
LUAU_FASTFLAG(LuauLowerBoundsCalculation)
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
TEST_SUITE_BEGIN("TableTests");
@ -1196,10 +1195,7 @@ TEST_CASE_FIXTURE(Fixture, "pass_incompatible_union_to_a_generic_table_without_c
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauLowerBoundsCalculation)
CHECK(get<MissingProperties>(result.errors[0]));
else
CHECK(get<TypeMismatch>(result.errors[0]));
CHECK(get<TypeMismatch>(result.errors[0]));
}
// This unit test could be flaky if the fix has regressed.
@ -2627,8 +2623,6 @@ do end
TEST_CASE_FIXTURE(BuiltinsFixture, "dont_crash_when_setmetatable_does_not_produce_a_metatabletypevar")
{
ScopedFastFlag luauFunctionArgMismatchDetails{"LuauFunctionArgMismatchDetails", true};
CheckResult result = check("local x = setmetatable({})");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Argument count mismatch. Function 'setmetatable' expects 2 arguments, but only 1 is specified", toString(result.errors[0]));
@ -2709,8 +2703,6 @@ local baz = foo[bar]
TEST_CASE_FIXTURE(BuiltinsFixture, "table_simple_call")
{
ScopedFastFlag luauFunctionArgMismatchDetails{"LuauFunctionArgMismatchDetails", true};
CheckResult result = check(R"(
local a = setmetatable({ x = 2 }, {
__call = function(self)
@ -2887,7 +2879,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "dont_leak_free_table_props")
TEST_CASE_FIXTURE(Fixture, "inferred_return_type_of_free_table")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
// {"LuauLowerBoundsCalculation", true},
{"DebugLuauSharedSelf", true},
};

View File

@ -14,12 +14,10 @@
#include <algorithm>
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
LUAU_FASTFLAG(LuauFixLocationSpanTableIndexExpr);
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(LuauInstantiateInSubtyping);
LUAU_FASTFLAG(LuauSpecialTypesAsterisked);
LUAU_FASTFLAG(LuauCheckGenericHOFTypes);
using namespace Luau;
@ -89,7 +87,6 @@ TEST_CASE_FIXTURE(Fixture, "infer_in_nocheck_mode")
{
ScopedFastFlag sff[]{
{"DebugLuauDeferredConstraintResolution", false},
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
@ -1001,21 +998,23 @@ TEST_CASE_FIXTURE(Fixture, "cli_50041_committing_txnlog_in_apollo_client_error")
end
)");
if (FFlag::LuauInstantiateInSubtyping && !FFlag::LuauCheckGenericHOFTypes)
if (FFlag::LuauInstantiateInSubtyping)
{
// though this didn't error before the flag, it seems as though it should error since fields of a table are invariant.
// the user's intent would likely be that these "method" fields would be read-only, but without an annotation, accepting this should be unsound.
// the user's intent would likely be that these "method" fields would be read-only, but without an annotation, accepting this should be
// unsound.
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(R"(Type 't1 where t1 = {+ getStoreFieldName: (t1, {| fieldName: string |} & {| from: number? |}) -> (a, b...) +}' could not be converted into 'Policies'
CHECK_EQ(
R"(Type 't1 where t1 = {+ getStoreFieldName: (t1, {| fieldName: string |} & {| from: number? |}) -> (a, b...) +}' could not be converted into 'Policies'
caused by:
Property 'getStoreFieldName' is not compatible. Type 't1 where t1 = ({+ getStoreFieldName: t1 +}, {| fieldName: string |} & {| from: number? |}) -> (a, b...)' could not be converted into '(Policies, FieldSpecifier) -> string'
caused by:
Argument #2 type is not compatible. Type 'FieldSpecifier' could not be converted into 'FieldSpecifier & {| from: number? |}'
caused by:
Not all intersection parts are compatible. Table type 'FieldSpecifier' not compatible with type '{| from: number? |}' because the former has extra field 'fieldName')",
toString(result.errors[0]));
toString(result.errors[0]));
}
else
{
@ -1044,7 +1043,7 @@ TEST_CASE_FIXTURE(Fixture, "type_infer_recursion_limit_no_ice")
TEST_CASE_FIXTURE(Fixture, "type_infer_recursion_limit_normalizer")
{
ScopedFastInt sfi("LuauTypeInferRecursionLimit", 10);
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
{"LuauAutocompleteDynamicLimits", true},
@ -1057,14 +1056,14 @@ TEST_CASE_FIXTURE(Fixture, "type_infer_recursion_limit_normalizer")
end
)");
LUAU_REQUIRE_ERRORS(result);
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Internal error: Code is too complex to typecheck! Consider adding type annotations around this area", toString(result.errors[0]));
}
TEST_CASE_FIXTURE(Fixture, "type_infer_cache_limit_normalizer")
{
ScopedFastInt sfi("LuauNormalizeCacheLimit", 10);
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -1101,45 +1100,6 @@ TEST_CASE_FIXTURE(Fixture, "follow_on_new_types_in_substitution")
LUAU_REQUIRE_NO_ERRORS(result);
}
/**
* The problem we had here was that the type of q in B.h was initially inferring to {} | {prop: free} before we bound
* that second table to the enclosing union.
*/
TEST_CASE_FIXTURE(Fixture, "do_not_bind_a_free_table_to_a_union_containing_that_table")
{
ScopedFastFlag flag[] = {
{"LuauLowerBoundsCalculation", true},
};
CheckResult result = check(R"(
--!strict
local A = {}
function A:f()
local t = {}
for key, value in pairs(self) do
t[key] = value
end
return t
end
local B = A:f()
function B.g(t)
assert(type(t) == "table")
assert(t.prop ~= nil)
end
function B.h(q)
q = q or {}
return q or {}
end
)");
}
TEST_CASE_FIXTURE(Fixture, "types_stored_in_astResolvedTypes")
{
CheckResult result = check(R"(

View File

@ -302,8 +302,8 @@ TEST_CASE_FIXTURE(TryUnifyFixture, "metatables_unify_against_shape_of_free_table
REQUIRE_EQ(state.errors.size(), 1);
std::string expected = "Type '{ @metatable {| __index: {| foo: string |} |}, { } }' could not be converted into '{- foo: number -}'\n"
"caused by:\n"
" Type 'number' could not be converted into 'string'";
"caused by:\n"
" Type 'number' could not be converted into 'string'";
CHECK_EQ(toString(state.errors[0]), expected);
}

View File

@ -7,9 +7,9 @@
#include "doctest.h"
using namespace Luau;
LUAU_FASTFLAG(LuauFunctionReturnStringificationFixup);
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
using namespace Luau;
TEST_SUITE_BEGIN("TypePackTests");
@ -311,7 +311,7 @@ local c: Packed<string, number, boolean>
auto ttvA = get<TableTypeVar>(requireType("a"));
REQUIRE(ttvA);
CHECK_EQ(toString(requireType("a")), "Packed<number>");
if (FFlag::LuauLowerBoundsCalculation)
if (FFlag::LuauFunctionReturnStringificationFixup)
CHECK_EQ(toString(requireType("a"), {true}), "{| f: (number) -> number |}");
else
CHECK_EQ(toString(requireType("a"), {true}), "{| f: (number) -> (number) |}");
@ -966,8 +966,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "detect_cyclic_typepacks2")
TEST_CASE_FIXTURE(Fixture, "unify_variadic_tails_in_arguments")
{
ScopedFastFlag luauCallUnifyPackTails{"LuauCallUnifyPackTails", true};
CheckResult result = check(R"(
function foo(...: string): number
return 1
@ -984,8 +982,6 @@ TEST_CASE_FIXTURE(Fixture, "unify_variadic_tails_in_arguments")
TEST_CASE_FIXTURE(Fixture, "unify_variadic_tails_in_arguments_free")
{
ScopedFastFlag luauCallUnifyPackTails{"LuauCallUnifyPackTails", true};
CheckResult result = check(R"(
function foo<T...>(...: T...): T...
return ...

View File

@ -6,7 +6,6 @@
#include "doctest.h"
LUAU_FASTFLAG(LuauLowerBoundsCalculation)
LUAU_FASTFLAG(LuauSpecialTypesAsterisked)
using namespace Luau;
@ -360,10 +359,7 @@ a.x = 2
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauLowerBoundsCalculation)
CHECK_EQ("Value of type '{| x: number, y: number |}?' could be nil", toString(result.errors[0]));
else
CHECK_EQ("Value of type '({| x: number |} & {| y: number |})?' could be nil", toString(result.errors[0]));
CHECK_EQ("Value of type '({| x: number |} & {| y: number |})?' could be nil", toString(result.errors[0]));
}
TEST_CASE_FIXTURE(Fixture, "optional_length_error")
@ -532,18 +528,13 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "table_union_write_indirect")
LUAU_REQUIRE_ERROR_COUNT(1, result);
// NOTE: union normalization will improve this message
if (FFlag::LuauLowerBoundsCalculation)
CHECK_EQ(toString(result.errors[0]), "Type '(string) -> number' could not be converted into '(number) -> string'\n"
"caused by:\n"
" Argument #1 type is not compatible. Type 'number' could not be converted into 'string'");
else
CHECK_EQ(toString(result.errors[0]),
R"(Type '(string) -> number' could not be converted into '((number) -> string) | ((number) -> string)'; none of the union options are compatible)");
CHECK_EQ(toString(result.errors[0]),
R"(Type '(string) -> number' could not be converted into '((number) -> string) | ((number) -> string)'; none of the union options are compatible)");
}
TEST_CASE_FIXTURE(Fixture, "union_true_and_false")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -561,7 +552,7 @@ TEST_CASE_FIXTURE(Fixture, "union_true_and_false")
TEST_CASE_FIXTURE(Fixture, "union_of_functions")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -598,7 +589,7 @@ TEST_CASE_FIXTURE(Fixture, "union_of_generic_typepack_functions")
TEST_CASE_FIXTURE(Fixture, "union_of_functions_mentioning_generics")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -612,12 +603,13 @@ TEST_CASE_FIXTURE(Fixture, "union_of_functions_mentioning_generics")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '(a) -> a?' could not be converted into '((b) -> b) | ((b?) -> nil)'; none of the union options are compatible");
CHECK_EQ(toString(result.errors[0]),
"Type '(a) -> a?' could not be converted into '((b) -> b) | ((b?) -> nil)'; none of the union options are compatible");
}
TEST_CASE_FIXTURE(Fixture, "union_of_functions_mentioning_generic_typepacks")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -631,12 +623,13 @@ TEST_CASE_FIXTURE(Fixture, "union_of_functions_mentioning_generic_typepacks")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '(number, a...) -> (number?, a...)' could not be converted into '((number) -> number) | ((number?, a...) -> (number?, a...))'; none of the union options are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '(number, a...) -> (number?, a...)' could not be converted into '((number) -> number) | ((number?, "
"a...) -> (number?, a...))'; none of the union options are compatible");
}
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_arg_arities")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -648,12 +641,13 @@ TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_arg_arities")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '(number) -> number?' could not be converted into '((number) -> nil) | ((number, string?) -> number)'; none of the union options are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '(number) -> number?' could not be converted into '((number) -> nil) | ((number, string?) -> "
"number)'; none of the union options are compatible");
}
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_result_arities")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -665,12 +659,13 @@ TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_result_arities")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '() -> number | string' could not be converted into '(() -> (string, string)) | (() -> number)'; none of the union options are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '() -> number | string' could not be converted into '(() -> (string, string)) | (() -> number)'; none "
"of the union options are compatible");
}
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_variadics")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -682,12 +677,13 @@ TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_variadics")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '(...nil) -> (...number?)' could not be converted into '((...string?) -> (...number)) | ((...string?) -> nil)'; none of the union options are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '(...nil) -> (...number?)' could not be converted into '((...string?) -> (...number)) | ((...string?) "
"-> nil)'; none of the union options are compatible");
}
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_arg_variadics")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -699,12 +695,13 @@ TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_arg_variadics")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '(number) -> ()' could not be converted into '((...number?) -> ()) | ((number?) -> ())'; none of the union options are compatible");
CHECK_EQ(toString(result.errors[0]),
"Type '(number) -> ()' could not be converted into '((...number?) -> ()) | ((number?) -> ())'; none of the union options are compatible");
}
TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_result_variadics")
{
ScopedFastFlag sffs[] {
ScopedFastFlag sffs[]{
{"LuauSubtypeNormalizer", true},
{"LuauTypeNormalization2", true},
};
@ -716,7 +713,8 @@ TEST_CASE_FIXTURE(Fixture, "union_of_functions_with_mismatching_result_variadics
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Type '() -> (number?, ...number)' could not be converted into '(() -> (...number)) | (() -> number)'; none of the union options are compatible");
CHECK_EQ(toString(result.errors[0]), "Type '() -> (number?, ...number)' could not be converted into '(() -> (...number)) | (() -> number)'; none "
"of the union options are compatible");
}
TEST_SUITE_END();

View File

@ -93,6 +93,7 @@ assert((function() local a = 1 a = a * 2 return a end)() == 2)
assert((function() local a = 1 a = a / 2 return a end)() == 0.5)
assert((function() local a = 5 a = a % 2 return a end)() == 1)
assert((function() local a = 3 a = a ^ 2 return a end)() == 9)
assert((function() local a = 9 a = a ^ 0.5 return a end)() == 3)
assert((function() local a = '1' a = a .. '2' return a end)() == "12")
assert((function() local a = '1' a = a .. '2' .. '3' return a end)() == "123")
@ -475,6 +476,12 @@ assert(rawequal("a", "a") == true)
assert(rawequal("a", "b") == false)
assert((function() a = {} b = {} mt = { __eq = function(l, r) return #l == #r end } setmetatable(a, mt) setmetatable(b, mt) return concat(a == b, rawequal(a, b)) end)() == "true,false")
-- rawequal fallback
assert(concat(pcall(rawequal, "a", "a")) == "true,true")
assert(concat(pcall(rawequal, "a", "b")) == "true,false")
assert(concat(pcall(rawequal, "a", nil)) == "true,false")
assert(pcall(rawequal, "a") == false)
-- metatable ops
local function vec3t(x, y, z)
return setmetatable({x=x, y=y, z=z}, {

View File

@ -71,6 +71,7 @@ for _, b in pairs(c) do
assert(bit32.bxor(b) == b)
assert(bit32.bxor(b, b) == 0)
assert(bit32.bxor(b, 0) == b)
assert(bit32.bxor(b, b, b) == b)
assert(bit32.bnot(b) ~= b)
assert(bit32.bnot(bit32.bnot(b)) == b)
assert(bit32.bnot(b) == 2^32 - 1 - b)
@ -104,6 +105,9 @@ assert(bit32.extract(0xa0001111, 16) == 0)
assert(bit32.extract(0xa0001111, 31) == 1)
assert(bit32.extract(42, 1, 3) == 5)
local pos pos = 1
assert(bit32.extract(42, pos, 3) == 5) -- test bit32.extract builtin instead of bit32.extractk
assert(not pcall(bit32.extract, 0, -1))
assert(not pcall(bit32.extract, 0, 32))
assert(not pcall(bit32.extract, 0, 0, 33))
@ -144,13 +148,17 @@ assert(bit32.lrotate("0x12345678", 4) == 0x23456781)
assert(bit32.rrotate("0x12345678", -4) == 0x23456781)
assert(bit32.arshift("0x12345678", 1) == 0x12345678 / 2)
assert(bit32.arshift("-1", 32) == 0xffffffff)
assert(bit32.arshift("-1", 1) == 0xffffffff)
assert(bit32.bnot("1") == 0xfffffffe)
assert(bit32.band("1", 3) == 1)
assert(bit32.band(1, "3") == 1)
assert(bit32.band(1, 3, "5") == 1)
assert(bit32.bor("1", 2) == 3)
assert(bit32.bor(1, "2") == 3)
assert(bit32.bor(1, 3, "5") == 7)
assert(bit32.bxor("1", 3) == 2)
assert(bit32.bxor(1, "3") == 2)
assert(bit32.bxor(1, 3, "5") == 7)
assert(bit32.btest(1, "3") == true)
assert(bit32.btest("1", 3) == true)
assert(bit32.countlz("42") == 26)

View File

@ -54,4 +54,19 @@ breakpoint(49, false) -- validate that disabling breakpoints works
bar()
local function breakpointSetFromMetamethod()
local a = setmetatable({}, {
__index = function()
breakpoint(67)
return 2
end
})
local b = a.x
assert(b == 2)
end
breakpointSetFromMetamethod()
return 'OK'

View File

@ -4,20 +4,6 @@ print('testing metatables')
local unpack = table.unpack
X = 20; B = 30
local _G = getfenv()
setfenv(1, setmetatable({}, {__index=_G}))
collectgarbage()
X = X+10
assert(X == 30 and _G.X == 20)
B = false
assert(B == false)
B = nil
assert(B == 30)
assert(getmetatable{} == nil)
assert(getmetatable(4) == nil)
assert(getmetatable(nil) == nil)
@ -299,14 +285,8 @@ x = c(3,4,5)
assert(i == 3 and x[1] == 3 and x[3] == 5)
assert(_G.X == 20)
assert(_G == getfenv(0))
print'+'
local _g = _G
setfenv(1, setmetatable({}, {__index=function (_,k) return _g[k] end}))
-- testing proxies
assert(getmetatable(newproxy()) == nil)
assert(getmetatable(newproxy(false)) == nil)
@ -480,4 +460,23 @@ do
end
end
function testfenv()
X = 20; B = 30
local _G = getfenv()
setfenv(1, setmetatable({}, {__index=_G}))
X = X+10
assert(X == 30 and _G.X == 20)
B = false
assert(B == false)
B = nil
assert(B == 30)
assert(_G.X == 20)
assert(_G == getfenv(0))
end
testfenv() -- DONT MOVE THIS LINE
return 'OK'

View File

@ -8,4 +8,13 @@ end
foo()
function bar()
local i = 0
while i < 10 do
i += i + 1
end
end
bar()
return "OK"

View File

@ -152,14 +152,34 @@ assert(eq(a[1000][3], 1000/3, 0.001))
print('+')
do -- testing NaN
local NaN = 10e500 - 10e400
local NaN -- to avoid constant folding
NaN = 10e500 - 10e400
assert(NaN ~= NaN)
assert(not (NaN == NaN))
assert(not (NaN < NaN))
assert(not (NaN <= NaN))
assert(not (NaN > NaN))
assert(not (NaN >= NaN))
assert(not (0 == NaN))
assert(not (0 < NaN))
assert(not (0 <= NaN))
assert(not (0 > NaN))
assert(not (0 >= NaN))
assert(not (NaN == 0))
assert(not (NaN < 0))
assert(not (NaN <= 0))
assert(not (NaN > 0))
assert(not (NaN >= 0))
assert(if NaN < 0 then false else true)
assert(if NaN <= 0 then false else true)
assert(if NaN > 0 then false else true)
assert(if NaN >= 0 then false else true)
local a = {}
assert(not pcall(function () a[NaN] = 1 end))
assert(a[NaN] == nil)
@ -215,6 +235,16 @@ assert(flag);
assert(select(2, pcall(math.random, 1, 2, 3)):match("wrong number of arguments"))
-- min/max
assert(math.min(1) == 1)
assert(math.min(1, 2) == 1)
assert(math.min(1, 2, -1) == -1)
assert(math.min(1, -1, 2) == -1)
assert(math.max(1) == 1)
assert(math.max(1, 2) == 2)
assert(math.max(1, 2, -1) == 2)
assert(math.max(1, -1, 2) == 2)
-- noise
assert(math.noise(0.5) == 0)
assert(math.noise(0.5, 0.5) == -0.25)
@ -277,8 +307,10 @@ assert(math.log("10", 10) == 1)
assert(math.log("9", 3) == 2)
assert(math.max("1", 2) == 2)
assert(math.max(2, "1") == 2)
assert(math.max(1, 2, "3") == 3)
assert(math.min("1", 2) == 1)
assert(math.min(2, "1") == 1)
assert(math.min(1, 2, "3") == 1)
local v,f = math.modf("1.5")
assert(v == 1 and f == 0.5)
assert(math.pow("2", 2) == 4)
@ -295,4 +327,9 @@ assert(math.sign("-2") == -1)
assert(math.sign("0") == 0)
assert(math.round("1.8") == 2)
-- test that fastcalls return correct number of results
assert(select('#', math.floor(1.4)) == 1)
assert(select('#', math.ceil(1.6)) == 1)
assert(select('#', math.sqrt(9)) == 1)
return('OK')

View File

@ -0,0 +1,23 @@
-- This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
print("safeenv reset")
local function envChangeInMetamethod()
-- declare constant so that at O2 this test doesn't interfere with constant folding which we can't deoptimize
local ten
ten = 10
local a = setmetatable({}, {
__index = function()
getfenv().math = { abs = function(n) return n*n end }
return 2
end
})
local b = a.x
assert(math.abs(ten) == 100)
end
envChangeInMetamethod()
return"OK"

View File

@ -21,7 +21,6 @@
#include <sys/sysctl.h>
#endif
#include <random>
#include <optional>
// Indicates if verbose output is enabled; can be overridden via --verbose
@ -31,8 +30,10 @@ bool verbose = false;
// Default optimization level for conformance test; can be overridden via -On
int optimizationLevel = 1;
// Something to seed a pseudorandom number generator with. Defaults to
// something from std::random_device.
// Run conformance tests with native code generation
bool codegen = false;
// Something to seed a pseudorandom number generator with
std::optional<unsigned> randomSeed;
static bool skipFastFlag(const char* flagName)
@ -257,6 +258,11 @@ int main(int argc, char** argv)
verbose = true;
}
if (doctest::parseFlag(argc, argv, "--codegen"))
{
codegen = true;
}
int level = -1;
if (doctest::parseIntOption(argc, argv, "-O", doctest::option_int, level))
{
@ -272,7 +278,7 @@ int main(int argc, char** argv)
if (doctest::parseOption(argc, argv, "--randomize") && !randomSeed)
{
randomSeed = std::random_device()();
randomSeed = unsigned(time(nullptr));
printf("Using RNG seed %u\n", *randomSeed);
}

View File

@ -48,7 +48,6 @@ BuiltinTests.assert_removes_falsy_types2
BuiltinTests.assert_removes_falsy_types_even_from_type_pack_tail_but_only_for_the_first_type
BuiltinTests.assert_returns_false_and_string_iff_it_knows_the_first_argument_cannot_be_truthy
BuiltinTests.bad_select_should_not_crash
BuiltinTests.coroutine_resume_anything_goes
BuiltinTests.coroutine_wrap_anything_goes
BuiltinTests.debug_info_is_crazy
BuiltinTests.debug_traceback_is_crazy
@ -56,7 +55,6 @@ BuiltinTests.dont_add_definitions_to_persistent_types
BuiltinTests.find_capture_types
BuiltinTests.find_capture_types2
BuiltinTests.find_capture_types3
BuiltinTests.global_singleton_types_are_sealed
BuiltinTests.gmatch_capture_types
BuiltinTests.gmatch_capture_types2
BuiltinTests.gmatch_capture_types_balanced_escaped_parens
@ -69,7 +67,6 @@ BuiltinTests.match_capture_types
BuiltinTests.match_capture_types2
BuiltinTests.math_max_checks_for_numbers
BuiltinTests.next_iterator_should_infer_types_and_type_check
BuiltinTests.os_time_takes_optional_date_table
BuiltinTests.pairs_iterator_should_infer_types_and_type_check
BuiltinTests.see_thru_select
BuiltinTests.select_slightly_out_of_range
@ -77,7 +74,6 @@ BuiltinTests.select_way_out_of_range
BuiltinTests.select_with_decimal_argument_is_rounded_down
BuiltinTests.set_metatable_needs_arguments
BuiltinTests.setmetatable_should_not_mutate_persisted_types
BuiltinTests.sort
BuiltinTests.sort_with_bad_predicate
BuiltinTests.sort_with_predicate
BuiltinTests.string_format_arg_count_mismatch
@ -88,8 +84,6 @@ BuiltinTests.string_format_report_all_type_errors_at_correct_positions
BuiltinTests.string_format_use_correct_argument
BuiltinTests.string_format_use_correct_argument2
BuiltinTests.string_format_use_correct_argument3
BuiltinTests.string_lib_self_noself
BuiltinTests.table_concat_returns_string
BuiltinTests.table_freeze_is_generic
BuiltinTests.table_insert_correctly_infers_type_of_array_2_args_overload
BuiltinTests.table_insert_correctly_infers_type_of_array_3_args_overload
@ -101,12 +95,10 @@ BuiltinTests.tonumber_returns_optional_number_type2
DefinitionTests.class_definition_overload_metamethods
DefinitionTests.declaring_generic_functions
DefinitionTests.definition_file_classes
FrontendTest.automatically_check_dependent_scripts
FrontendTest.environments
FrontendTest.imported_table_modification_2
FrontendTest.it_should_be_safe_to_stringify_errors_when_full_type_graph_is_discarded
FrontendTest.nocheck_cycle_used_by_checked
FrontendTest.recheck_if_dependent_script_is_dirty
FrontendTest.reexport_cyclic_type
FrontendTest.reexport_type_alias
FrontendTest.trace_requires_in_nonstrict_mode
@ -132,18 +124,15 @@ GenericsTests.generic_type_pack_parentheses
GenericsTests.generic_type_pack_unification1
GenericsTests.generic_type_pack_unification2
GenericsTests.generic_type_pack_unification3
GenericsTests.higher_rank_polymorphism_should_not_accept_instantiated_arguments
GenericsTests.infer_generic_function_function_argument
GenericsTests.infer_generic_function_function_argument_overloaded
GenericsTests.infer_generic_methods
GenericsTests.inferred_local_vars_can_be_polytypes
GenericsTests.instantiate_cyclic_generic_function
GenericsTests.instantiate_generic_function_in_assignments
GenericsTests.instantiate_generic_function_in_assignments2
GenericsTests.instantiated_function_argument_names
GenericsTests.instantiation_sharing_types
GenericsTests.local_vars_can_be_instantiated_polytypes
GenericsTests.no_stack_overflow_from_quantifying
GenericsTests.properties_can_be_instantiated_polytypes
GenericsTests.reject_clashing_generic_and_pack_names
GenericsTests.self_recursive_instantiated_param
IntersectionTypes.index_on_an_intersection_type_with_mixed_types
@ -155,8 +144,6 @@ IntersectionTypes.should_still_pick_an_overload_whose_arguments_are_unions
IntersectionTypes.table_intersection_write_sealed
IntersectionTypes.table_intersection_write_sealed_indirect
IntersectionTypes.table_write_sealed_indirect
isSubtype.intersection_of_tables
isSubtype.table_with_table_prop
ModuleTests.any_persistance_does_not_leak
ModuleTests.clone_self_property
ModuleTests.deepClone_cyclic_table
@ -172,51 +159,24 @@ NonstrictModeTests.local_tables_are_not_any
NonstrictModeTests.locals_are_any_by_default
NonstrictModeTests.offer_a_hint_if_you_use_a_dot_instead_of_a_colon
NonstrictModeTests.parameters_having_type_any_are_optional
NonstrictModeTests.returning_insufficient_return_values
NonstrictModeTests.returning_too_many_values
NonstrictModeTests.table_dot_insert_and_recursive_calls
NonstrictModeTests.table_props_are_any
Normalize.any_wins_the_battle_over_unknown_in_unions
Normalize.constrained_intersection_of_intersections
Normalize.cyclic_intersection
Normalize.cyclic_table_normalizes_sensibly
Normalize.cyclic_union
Normalize.fuzz_failure_bound_type_is_normal_but_not_its_bounded_to
Normalize.intersection_combine_on_bound_self
Normalize.intersection_inside_a_table_inside_another_intersection
Normalize.intersection_inside_a_table_inside_another_intersection_2
Normalize.intersection_inside_a_table_inside_another_intersection_3
Normalize.intersection_inside_a_table_inside_another_intersection_4
Normalize.intersection_of_confluent_overlapping_tables
Normalize.intersection_of_disjoint_tables
Normalize.intersection_of_functions
Normalize.intersection_of_overlapping_tables
Normalize.intersection_of_tables_with_indexers
Normalize.normalization_does_not_convert_ever
Normalize.normalize_module_return_type
Normalize.normalize_unions_containing_never
Normalize.normalize_unions_containing_unknown
Normalize.union_of_distinct_free_types
Normalize.variadic_tail_is_marked_normal
Normalize.visiting_a_type_twice_is_not_considered_normal
ParseErrorRecovery.generic_type_list_recovery
ParseErrorRecovery.recovery_of_parenthesized_expressions
ParserTests.parse_nesting_based_end_detection_failsafe_earlier
ParserTests.parse_nesting_based_end_detection_local_function
ProvisionalTests.bail_early_if_unification_is_too_complicated
ProvisionalTests.choose_the_right_overload_for_pcall
ProvisionalTests.constrained_is_level_dependent
ProvisionalTests.discriminate_from_x_not_equal_to_nil
ProvisionalTests.do_not_ice_when_trying_to_pick_first_of_generic_type_pack
ProvisionalTests.error_on_eq_metamethod_returning_a_type_other_than_boolean
ProvisionalTests.function_returns_many_things_but_first_of_it_is_forgotten
ProvisionalTests.generic_type_leak_to_module_interface_variadic
ProvisionalTests.greedy_inference_with_shared_self_triggers_function_with_no_returns
ProvisionalTests.invariant_table_properties_means_instantiating_tables_in_call_is_unsound
ProvisionalTests.it_should_be_agnostic_of_actual_size
ProvisionalTests.lower_bounds_calculation_is_too_permissive_with_overloaded_higher_order_functions
ProvisionalTests.normalization_fails_on_certain_kinds_of_cyclic_tables
ProvisionalTests.pcall_returns_at_least_two_value_but_function_returns_nothing
ProvisionalTests.setmetatable_constrains_free_type_into_free_table
ProvisionalTests.specialization_binds_with_prototypes_too_early
ProvisionalTests.table_insert_with_a_singleton_argument
ProvisionalTests.typeguard_inference_incomplete
ProvisionalTests.weirditer_should_not_loop_forever
ProvisionalTests.while_body_are_also_refined
@ -225,7 +185,6 @@ RefinementTest.apply_refinements_on_astexprindexexpr_whose_subscript_expr_is_con
RefinementTest.assert_a_to_be_truthy_then_assert_a_to_be_number
RefinementTest.assert_non_binary_expressions_actually_resolve_constraints
RefinementTest.call_a_more_specific_function_using_typeguard
RefinementTest.correctly_lookup_a_shadowed_local_that_which_was_previously_refined
RefinementTest.correctly_lookup_property_whose_base_was_previously_refined
RefinementTest.correctly_lookup_property_whose_base_was_previously_refined2
RefinementTest.discriminate_from_isa_of_x
@ -311,6 +270,7 @@ TableTests.found_like_key_in_table_function_call
TableTests.found_like_key_in_table_property_access
TableTests.found_multiple_like_keys
TableTests.function_calls_produces_sealed_table_given_unsealed_table
TableTests.generic_table_instantiation_potential_regression
TableTests.getmetatable_returns_pointer_to_metatable
TableTests.give_up_after_one_metatable_index_look_up
TableTests.hide_table_error_properties
@ -323,6 +283,7 @@ TableTests.infer_indexer_from_value_property_in_literal
TableTests.inferred_return_type_of_free_table
TableTests.inferring_crazy_table_should_also_be_quick
TableTests.instantiate_table_cloning_3
TableTests.invariant_table_properties_means_instantiating_tables_in_call_is_unsound
TableTests.leaking_bad_metatable_errors
TableTests.length_operator_union_errors
TableTests.less_exponential_blowup_please
@ -340,7 +301,6 @@ TableTests.oop_polymorphic
TableTests.open_table_unification_2
TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table
TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table_2
TableTests.pass_incompatible_union_to_a_generic_table_without_crashing
TableTests.persistent_sealed_table_is_immutable
TableTests.prop_access_on_key_whose_types_mismatches
TableTests.property_lookup_through_tabletypevar_metatable
@ -378,7 +338,6 @@ ToDot.function
ToDot.table
ToString.exhaustive_toString_of_cyclic_table
ToString.function_type_with_argument_names_generic
ToString.no_parentheses_around_cyclic_function_type_in_union
ToString.toStringDetailed2
ToString.toStringErrorPack
ToString.toStringNamedFunction_generic_pack
@ -412,12 +371,12 @@ TypeAliases.type_alias_local_rename
TypeAliases.type_alias_of_an_imported_recursive_generic_type
TypeAliases.type_alias_of_an_imported_recursive_type
TypeInfer.checking_should_not_ice
TypeInfer.cli_50041_committing_txnlog_in_apollo_client_error
TypeInfer.dont_report_type_errors_within_an_AstExprError
TypeInfer.dont_report_type_errors_within_an_AstStatError
TypeInfer.globals
TypeInfer.globals2
TypeInfer.infer_assignment_value_types_mutable_lval
TypeInfer.it_is_ok_to_have_inconsistent_number_of_return_values_in_nonstrict
TypeInfer.no_stack_overflow_from_isoptional
TypeInfer.tc_after_error_recovery_no_replacement_name_in_error
TypeInfer.tc_if_else_expressions_expected_type_3
@ -427,7 +386,7 @@ TypeInfer.tc_interpolated_string_with_invalid_expression
TypeInfer.type_infer_recursion_limit_no_ice
TypeInferAnyError.assign_prop_to_table_by_calling_any_yields_any
TypeInferAnyError.for_in_loop_iterator_is_any2
TypeInferAnyError.union_of_types_regression_test
TypeInferAnyError.for_in_loop_iterator_is_error2
TypeInferClasses.call_base_method
TypeInferClasses.call_instance_method
TypeInferClasses.can_assign_to_prop_of_base_class_using_string
@ -441,7 +400,6 @@ TypeInferClasses.optional_class_field_access_error
TypeInferClasses.table_class_unification_reports_sane_errors_for_missing_properties
TypeInferClasses.warn_when_prop_almost_matches
TypeInferClasses.we_can_report_when_someone_is_trying_to_use_a_table_rather_than_a_class
TypeInferFunctions.call_o_with_another_argument_after_foo_was_quantified
TypeInferFunctions.calling_function_with_anytypepack_doesnt_leak_free_types
TypeInferFunctions.calling_function_with_incorrect_argument_type_yields_errors_spanning_argument
TypeInferFunctions.dont_give_other_overloads_message_if_only_one_argument_matching_overload_exists
@ -454,26 +412,20 @@ TypeInferFunctions.function_decl_non_self_sealed_overwrite_2
TypeInferFunctions.function_decl_non_self_unsealed_overwrite
TypeInferFunctions.function_does_not_return_enough_values
TypeInferFunctions.function_statement_sealed_table_assignment_through_indexer
TypeInferFunctions.ignored_return_values
TypeInferFunctions.improved_function_arg_mismatch_error_nonstrict
TypeInferFunctions.improved_function_arg_mismatch_errors
TypeInferFunctions.inconsistent_higher_order_function
TypeInferFunctions.inconsistent_return_types
TypeInferFunctions.infer_anonymous_function_arguments
TypeInferFunctions.infer_return_type_from_selected_overload
TypeInferFunctions.infer_return_value_type
TypeInferFunctions.infer_that_function_does_not_return_a_table
TypeInferFunctions.it_is_ok_not_to_supply_enough_retvals
TypeInferFunctions.list_all_overloads_if_no_overload_takes_given_argument_count
TypeInferFunctions.list_only_alternative_overloads_that_match_argument_count
TypeInferFunctions.no_lossy_function_type
TypeInferFunctions.occurs_check_failure_in_function_return_type
TypeInferFunctions.quantify_constrained_types
TypeInferFunctions.record_matching_overload
TypeInferFunctions.report_exiting_without_return_nonstrict
TypeInferFunctions.report_exiting_without_return_strict
TypeInferFunctions.return_type_by_overload
TypeInferFunctions.strict_mode_ok_with_missing_arguments
TypeInferFunctions.too_few_arguments_variadic
TypeInferFunctions.too_few_arguments_variadic_generic
TypeInferFunctions.too_few_arguments_variadic_generic2
@ -489,14 +441,13 @@ TypeInferLoops.for_in_with_generic_next
TypeInferLoops.for_in_with_just_one_iterator_is_ok
TypeInferLoops.loop_iter_no_indexer_nonstrict
TypeInferLoops.loop_iter_trailing_nil
TypeInferLoops.loop_typecheck_crash_on_empty_optional
TypeInferLoops.unreachable_code_after_infinite_loop
TypeInferLoops.varlist_declared_by_for_in_loop_should_be_free
TypeInferModules.bound_free_table_export_is_ok
TypeInferModules.custom_require_global
TypeInferModules.do_not_modify_imported_types
TypeInferModules.do_not_modify_imported_types_2
TypeInferModules.do_not_modify_imported_types_3
TypeInferModules.general_require_type_mismatch
TypeInferModules.module_type_conflict
TypeInferModules.module_type_conflict_instantiated
TypeInferModules.require_a_variadic_function
@ -531,7 +482,6 @@ TypeInferOperators.expected_types_through_binary_or
TypeInferOperators.infer_any_in_all_modes_when_lhs_is_unknown
TypeInferOperators.or_joins_types
TypeInferOperators.or_joins_types_with_no_extras
TypeInferOperators.primitive_arith_no_metatable
TypeInferOperators.primitive_arith_possible_metatable
TypeInferOperators.produce_the_correct_error_message_when_comparing_a_table_with_a_metatable_with_one_that_does_not
TypeInferOperators.refine_and_or
@ -543,7 +493,6 @@ TypeInferOperators.typecheck_overloaded_multiply_that_is_an_intersection_on_rhs
TypeInferOperators.typecheck_unary_len_error
TypeInferOperators.typecheck_unary_minus
TypeInferOperators.typecheck_unary_minus_error
TypeInferOperators.unary_not_is_boolean
TypeInferOperators.UnknownGlobalCompoundAssign
TypeInferPrimitives.CheckMethodsOfNumber
TypeInferPrimitives.singleton_types
@ -563,8 +512,6 @@ TypeInferUnknownNever.type_packs_containing_never_is_itself_uninhabitable
TypeInferUnknownNever.type_packs_containing_never_is_itself_uninhabitable2
TypeInferUnknownNever.unary_minus_of_never
TypePackTests.higher_order_function
TypePackTests.multiple_varargs_inference_are_not_confused
TypePackTests.no_return_size_should_be_zero
TypePackTests.pack_tail_unification_check
TypePackTests.parenthesized_varargs_returns_any
TypePackTests.type_alias_backwards_compatible
@ -606,7 +553,6 @@ TypeSingletons.string_singleton_subtype
TypeSingletons.string_singletons
TypeSingletons.string_singletons_escape_chars
TypeSingletons.string_singletons_mismatch
TypeSingletons.table_insert_with_a_singleton_argument
TypeSingletons.table_properties_type_error_escapes
TypeSingletons.tagged_unions_using_singletons
TypeSingletons.taking_the_length_of_string_singleton
@ -616,14 +562,12 @@ TypeSingletons.widening_happens_almost_everywhere
TypeSingletons.widening_happens_almost_everywhere_except_for_tables
UnionTypes.error_detailed_optional
UnionTypes.error_detailed_union_all
UnionTypes.error_takes_optional_arguments
UnionTypes.index_on_a_union_type_with_missing_property
UnionTypes.index_on_a_union_type_with_mixed_types
UnionTypes.index_on_a_union_type_with_one_optional_property
UnionTypes.index_on_a_union_type_with_one_property_of_type_any
UnionTypes.index_on_a_union_type_with_property_guaranteed_to_exist
UnionTypes.index_on_a_union_type_works_at_arbitrary_depth
UnionTypes.optional_arguments
UnionTypes.optional_assignment_errors
UnionTypes.optional_call_error
UnionTypes.optional_field_access_error

View File

@ -43,16 +43,23 @@ for line in input:
if match:
inst = match[1]
signature = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)"
signature = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, StkId base, TValue* k)"
header += signature + ";\n"
function = signature + "\n"
function += "{\n"
function += " [[maybe_unused]] Closure* cl = clvalue(L->ci->func);\n"
state = 1
# find the end of an instruction
# first line of the instruction which is "{"
elif state == 1:
assert(line == " {\n")
state = 2
# find the end of an instruction
elif state == 2:
# remove jumps back into the native code
if line == "#if LUA_CUSTOM_EXECUTION\n":
state = 2
state = 3
continue
if line[0] == ' ':
@ -70,7 +77,7 @@ for line in input:
if match:
# break is not supported
if inst == "LOP_BREAK":
function = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)\n"
function = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, StkId base, TValue* k)\n"
function += "{\n LUAU_ASSERT(!\"Unsupported deprecated opcode\");\n LUAU_UNREACHABLE();\n}\n"
# handle fallthrough
elif inst == "LOP_NAMECALL":
@ -81,14 +88,14 @@ for line in input:
state = 0
# skip LUA_CUSTOM_EXECUTION code blocks
elif state == 2:
elif state == 3:
if line == "#endif\n":
state = 3
state = 4
continue
# skip extra line
elif state == 3:
state = 1
elif state == 4:
state = 2
# make sure we found the ending
assert(state == 0)

View File

@ -5,6 +5,9 @@ import os.path
import subprocess as sp
import sys
import xml.sax as x
import colorama as c
c.init()
SCRIPT_PATH = os.path.split(sys.argv[0])[0]
FAIL_LIST_PATH = os.path.join(SCRIPT_PATH, "faillist.txt")
@ -35,6 +38,10 @@ class Handler(x.ContentHandler):
self.numSkippedTests = 0
self.pass_count = 0
self.fail_count = 0
self.test_count = 0
def startElement(self, name, attrs):
if name == "TestSuite":
self.currentTest.append(attrs["name"])
@ -53,6 +60,12 @@ class Handler(x.ContentHandler):
r = self.results.get(dottedName, True)
self.results[dottedName] = r and passed
self.test_count += 1
if passed:
self.pass_count += 1
else:
self.fail_count += 1
elif name == "OverallResultsTestCases":
self.numSkippedTests = safeParseInt(attrs.get("skipped", 0))
@ -137,11 +150,33 @@ def main():
p.wait()
unexpected_fails = 0
unexpected_passes = 0
for testName, passed in handler.results.items():
if passed and testName in failList:
print_stderr(f"UNEXPECTED: {testName} should have failed")
unexpected_passes += 1
print_stderr(
f"UNEXPECTED: {c.Fore.RED}{testName}{c.Fore.RESET} should have failed"
)
elif not passed and testName not in failList:
print_stderr(f"UNEXPECTED: {testName} should have passed")
unexpected_fails += 1
print_stderr(
f"UNEXPECTED: {c.Fore.GREEN}{testName}{c.Fore.RESET} should have passed"
)
if unexpected_fails or unexpected_passes:
print_stderr("")
print_stderr(f"Unexpected fails: {unexpected_fails}")
print_stderr(f"Unexpected passes: {unexpected_passes}")
pass_percent = int(handler.pass_count / handler.test_count * 100)
print_stderr("")
print_stderr(
f"{handler.pass_count} of {handler.test_count} tests passed. ({pass_percent}%)"
)
print_stderr(f"{handler.fail_count} tests failed.")
if args.write:
newFailList = sorted(