Sync to upstream/release/582 (#960)

* Optimized operations like instantiation and module export for very
large types

In our new typechecker:
* Typechecking of function calls was rewritten to handle more cases
correctly
* Fixed a crash that can happen after self-referential type is exported
from a module
* Fixed a false positive error in string comparison
* Added handling of `for...in` variable type annotations and fixed
issues with the iterator call inside
* Self-referential 'hasProp' and 'setProp' constraints are now handled
correctly
 
In our native code generation (jit):
* Added '--target' argument to luau-compile to test multiple
architectures different from host architecture
* GC barrier tag check is skipped if type is already known to be
GC-collectable
* Added GET_TYPE/GET_TYPEOF instructions for type/typeof fast-calls
* Improved code size of interrupt handlers on X64
This commit is contained in:
vegorov-rbx 2023-06-23 23:19:39 -07:00 committed by GitHub
parent d458d240cd
commit 76bea81a7b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 1487 additions and 1028 deletions

View File

@ -83,7 +83,7 @@ struct IterableConstraint
TypePackId variables; TypePackId variables;
const AstNode* nextAstFragment; const AstNode* nextAstFragment;
DenseHashMap<const AstNode*, TypeId>* astOverloadResolvedTypes; DenseHashMap<const AstNode*, TypeId>* astForInNextTypes;
}; };
// name(namedType) = name // name(namedType) = name

View File

@ -245,6 +245,17 @@ private:
template <typename TID> template <typename TID>
bool tryUnify(NotNull<const Constraint> constraint, TID subTy, TID superTy); bool tryUnify(NotNull<const Constraint> constraint, TID subTy, TID superTy);
/**
* Bind a BlockedType to another type while taking care not to bind it to
* itself in the case that resultTy == blockedTy. This can happen if we
* have a tautological constraint. When it does, we must instead bind
* blockedTy to a fresh type belonging to an appropriate scope.
*
* To determine which scope is appropriate, we also accept rootTy, which is
* to be the type that contains blockedTy.
*/
void bindBlockedType(TypeId blockedTy, TypeId resultTy, TypeId rootTy, Location location);
/** /**
* Marks a constraint as being blocked on a type or type pack. The constraint * Marks a constraint as being blocked on a type or type pack. The constraint
* solver will not attempt to dispatch blocked constraints until their * solver will not attempt to dispatch blocked constraints until their

View File

@ -81,13 +81,29 @@ struct Module
DenseHashMap<const AstExpr*, TypePackId> astTypePacks{nullptr}; DenseHashMap<const AstExpr*, TypePackId> astTypePacks{nullptr};
DenseHashMap<const AstExpr*, TypeId> astExpectedTypes{nullptr}; DenseHashMap<const AstExpr*, TypeId> astExpectedTypes{nullptr};
// For AST nodes that are function calls, this map provides the
// unspecialized type of the function that was called. If a function call
// resolves to a __call metamethod application, this map will point at that
// metamethod.
//
// This is useful for type checking and Signature Help.
DenseHashMap<const AstNode*, TypeId> astOriginalCallTypes{nullptr}; DenseHashMap<const AstNode*, TypeId> astOriginalCallTypes{nullptr};
// The specialization of a function that was selected. If the function is
// generic, those generic type parameters will be replaced with the actual
// types that were passed. If the function is an overload, this map will
// point at the specific overloads that were selected.
DenseHashMap<const AstNode*, TypeId> astOverloadResolvedTypes{nullptr}; DenseHashMap<const AstNode*, TypeId> astOverloadResolvedTypes{nullptr};
// Only used with for...in loops. The computed type of the next() function
// is kept here for type checking.
DenseHashMap<const AstNode*, TypeId> astForInNextTypes{nullptr};
DenseHashMap<const AstType*, TypeId> astResolvedTypes{nullptr}; DenseHashMap<const AstType*, TypeId> astResolvedTypes{nullptr};
DenseHashMap<const AstTypePack*, TypePackId> astResolvedTypePacks{nullptr}; DenseHashMap<const AstTypePack*, TypePackId> astResolvedTypePacks{nullptr};
// Map AST nodes to the scope they create. Cannot be NotNull<Scope> because we need a sentinel value for the map. // Map AST nodes to the scope they create. Cannot be NotNull<Scope> because
// we need a sentinel value for the map.
DenseHashMap<const AstNode*, Scope*> astScopes{nullptr}; DenseHashMap<const AstNode*, Scope*> astScopes{nullptr};
std::unordered_map<Name, TypeId> declaredGlobals; std::unordered_map<Name, TypeId> declaredGlobals;
@ -103,8 +119,9 @@ struct Module
bool hasModuleScope() const; bool hasModuleScope() const;
ScopePtr getModuleScope() const; ScopePtr getModuleScope() const;
// Once a module has been typechecked, we clone its public interface into a separate arena. // Once a module has been typechecked, we clone its public interface into a
// This helps us to force Type ownership into a DAG rather than a DCG. // separate arena. This helps us to force Type ownership into a DAG rather
// than a DCG.
void clonePublicInterface(NotNull<BuiltinTypes> builtinTypes, InternalErrorReporter& ice); void clonePublicInterface(NotNull<BuiltinTypes> builtinTypes, InternalErrorReporter& ice);
}; };

View File

@ -207,8 +207,6 @@ struct NormalizedFunctionType
struct NormalizedType; struct NormalizedType;
using NormalizedTyvars = std::unordered_map<TypeId, std::unique_ptr<NormalizedType>>; using NormalizedTyvars = std::unordered_map<TypeId, std::unique_ptr<NormalizedType>>;
bool isInhabited_DEPRECATED(const NormalizedType& norm);
// A normalized type is either any, unknown, or one of the form P | T | F | G where // A normalized type is either any, unknown, or one of the form P | T | F | G where
// * P is a union of primitive types (including singletons, classes and the error type) // * P is a union of primitive types (including singletons, classes and the error type)
// * T is a union of table types // * T is a union of table types

View File

@ -69,6 +69,19 @@ struct TarjanWorklistVertex
int lastEdge; int lastEdge;
}; };
struct TarjanNode
{
TypeId ty;
TypePackId tp;
bool onStack;
bool dirty;
// Tarjan calculates the lowlink for each vertex,
// which is the lowest ancestor index reachable from the vertex.
int lowlink;
};
// Tarjan's algorithm for finding the SCCs in a cyclic structure. // Tarjan's algorithm for finding the SCCs in a cyclic structure.
// https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm // https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
struct Tarjan struct Tarjan
@ -76,17 +89,12 @@ struct Tarjan
// Vertices (types and type packs) are indexed, using pre-order traversal. // Vertices (types and type packs) are indexed, using pre-order traversal.
DenseHashMap<TypeId, int> typeToIndex{nullptr}; DenseHashMap<TypeId, int> typeToIndex{nullptr};
DenseHashMap<TypePackId, int> packToIndex{nullptr}; DenseHashMap<TypePackId, int> packToIndex{nullptr};
std::vector<TypeId> indexToType;
std::vector<TypePackId> indexToPack; std::vector<TarjanNode> nodes;
// Tarjan keeps a stack of vertices where we're still in the process // Tarjan keeps a stack of vertices where we're still in the process
// of finding their SCC. // of finding their SCC.
std::vector<int> stack; std::vector<int> stack;
std::vector<bool> onStack;
// Tarjan calculates the lowlink for each vertex,
// which is the lowest ancestor index reachable from the vertex.
std::vector<int> lowlink;
int childCount = 0; int childCount = 0;
int childLimit = 0; int childLimit = 0;
@ -98,6 +106,7 @@ struct Tarjan
std::vector<TypeId> edgesTy; std::vector<TypeId> edgesTy;
std::vector<TypePackId> edgesTp; std::vector<TypePackId> edgesTp;
std::vector<TarjanWorklistVertex> worklist; std::vector<TarjanWorklistVertex> worklist;
// This is hot code, so we optimize recursion to a stack. // This is hot code, so we optimize recursion to a stack.
TarjanResult loop(); TarjanResult loop();
@ -124,10 +133,22 @@ struct Tarjan
TarjanResult visitRoot(TypeId ty); TarjanResult visitRoot(TypeId ty);
TarjanResult visitRoot(TypePackId ty); TarjanResult visitRoot(TypePackId ty);
// Each subclass gets called back once for each edge, void clearTarjan();
// and once for each SCC.
virtual void visitEdge(int index, int parentIndex) {} // Get/set the dirty bit for an index (grows the vector if needed)
virtual void visitSCC(int index) {} bool getDirty(int index);
void setDirty(int index, bool d);
// Find all the dirty vertices reachable from `t`.
TarjanResult findDirty(TypeId t);
TarjanResult findDirty(TypePackId t);
// We find dirty vertices using Tarjan
void visitEdge(int index, int parentIndex);
void visitSCC(int index);
TarjanResult loop_DEPRECATED();
void visitSCC_DEPRECATED(int index);
// Each subclass can decide to ignore some nodes. // Each subclass can decide to ignore some nodes.
virtual bool ignoreChildren(TypeId ty) virtual bool ignoreChildren(TypeId ty)
@ -150,27 +171,6 @@ struct Tarjan
{ {
return ignoreChildren(ty); return ignoreChildren(ty);
} }
};
// We use Tarjan to calculate dirty bits. We set `dirty[i]` true
// if the vertex with index `i` can reach a dirty vertex.
struct FindDirty : Tarjan
{
std::vector<bool> dirty;
void clearTarjan();
// Get/set the dirty bit for an index (grows the vector if needed)
bool getDirty(int index);
void setDirty(int index, bool d);
// Find all the dirty vertices reachable from `t`.
TarjanResult findDirty(TypeId t);
TarjanResult findDirty(TypePackId t);
// We find dirty vertices using Tarjan
void visitEdge(int index, int parentIndex) override;
void visitSCC(int index) override;
// Subclasses should say which vertices are dirty, // Subclasses should say which vertices are dirty,
// and what to do with dirty vertices. // and what to do with dirty vertices.
@ -178,11 +178,18 @@ struct FindDirty : Tarjan
virtual bool isDirty(TypePackId tp) = 0; virtual bool isDirty(TypePackId tp) = 0;
virtual void foundDirty(TypeId ty) = 0; virtual void foundDirty(TypeId ty) = 0;
virtual void foundDirty(TypePackId tp) = 0; virtual void foundDirty(TypePackId tp) = 0;
// TODO: remove with FFlagLuauTarjanSingleArr
std::vector<TypeId> indexToType;
std::vector<TypePackId> indexToPack;
std::vector<bool> onStack;
std::vector<int> lowlink;
std::vector<bool> dirty;
}; };
// And finally substitution, which finds all the reachable dirty vertices // And finally substitution, which finds all the reachable dirty vertices
// and replaces them with clean ones. // and replaces them with clean ones.
struct Substitution : FindDirty struct Substitution : Tarjan
{ {
protected: protected:
Substitution(const TxnLog* log_, TypeArena* arena) Substitution(const TxnLog* log_, TypeArena* arena)

View File

@ -19,8 +19,6 @@
#include <unordered_map> #include <unordered_map>
#include <unordered_set> #include <unordered_set>
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution)
namespace Luau namespace Luau
{ {

View File

@ -64,4 +64,13 @@ const T* get(std::optional<Ty> ty)
return nullptr; return nullptr;
} }
template<typename Ty>
std::optional<Ty> follow(std::optional<Ty> ty)
{
if (ty)
return follow(*ty);
else
return std::nullopt;
}
} // namespace Luau } // namespace Luau

View File

@ -6,8 +6,6 @@
#include "Luau/Normalize.h" #include "Luau/Normalize.h"
#include "Luau/TxnLog.h" #include "Luau/TxnLog.h"
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution)
namespace Luau namespace Luau
{ {
@ -78,7 +76,7 @@ TypePackId Anyification::clean(TypePackId tp)
bool Anyification::ignoreChildren(TypeId ty) bool Anyification::ignoreChildren(TypeId ty)
{ {
if (FFlag::LuauClassTypeVarsInSubstitution && get<ClassType>(ty)) if (get<ClassType>(ty))
return true; return true;
return ty->persistent; return ty->persistent;

View File

@ -2,8 +2,6 @@
#include "Luau/ApplyTypeFunction.h" #include "Luau/ApplyTypeFunction.h"
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution)
namespace Luau namespace Luau
{ {
@ -33,7 +31,7 @@ bool ApplyTypeFunction::ignoreChildren(TypeId ty)
{ {
if (get<GenericType>(ty)) if (get<GenericType>(ty))
return true; return true;
else if (FFlag::LuauClassTypeVarsInSubstitution && get<ClassType>(ty)) else if (get<ClassType>(ty))
return true; return true;
else else
return false; return false;

View File

@ -751,7 +751,12 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatForIn* f
variableTypes.reserve(forIn->vars.size); variableTypes.reserve(forIn->vars.size);
for (AstLocal* var : forIn->vars) for (AstLocal* var : forIn->vars)
{ {
TypeId ty = freshType(loopScope); TypeId ty = nullptr;
if (var->annotation)
ty = resolveType(loopScope, var->annotation, /*inTypeArguments*/ false);
else
ty = freshType(loopScope);
loopScope->bindings[var] = Binding{ty, var->location}; loopScope->bindings[var] = Binding{ty, var->location};
variableTypes.push_back(ty); variableTypes.push_back(ty);
@ -763,7 +768,7 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatForIn* f
TypePackId variablePack = arena->addTypePack(std::move(variableTypes), arena->addTypePack(FreeTypePack{loopScope.get()})); TypePackId variablePack = arena->addTypePack(std::move(variableTypes), arena->addTypePack(FreeTypePack{loopScope.get()}));
addConstraint( addConstraint(
loopScope, getLocation(forIn->values), IterableConstraint{iterator, variablePack, forIn->values.data[0], &module->astOverloadResolvedTypes}); loopScope, getLocation(forIn->values), IterableConstraint{iterator, variablePack, forIn->values.data[0], &module->astForInNextTypes});
visit(loopScope, forIn->body); visit(loopScope, forIn->body);
return ControlFlow::None; return ControlFlow::None;

View File

@ -1453,7 +1453,7 @@ bool ConstraintSolver::tryDispatch(const HasPropConstraint& c, NotNull<const Con
return false; return false;
} }
asMutable(c.resultType)->ty.emplace<BoundType>(result.value_or(builtinTypes->anyType)); bindBlockedType(c.resultType, result.value_or(builtinTypes->anyType), c.subjectType, constraint->location);
unblock(c.resultType, constraint->location); unblock(c.resultType, constraint->location);
return true; return true;
} }
@ -1559,8 +1559,8 @@ bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Con
existingPropType = result; existingPropType = result;
} }
auto bind = [](TypeId a, TypeId b) { auto bind = [&](TypeId a, TypeId b) {
asMutable(a)->ty.emplace<BoundType>(b); bindBlockedType(a, b, c.subjectType, constraint->location);
}; };
if (existingPropType) if (existingPropType)
@ -2143,7 +2143,9 @@ bool ConstraintSolver::tryDispatchIterableFunction(
// if there are no errors from unifying the two, we can pass forward the expected type as our selected resolution. // if there are no errors from unifying the two, we can pass forward the expected type as our selected resolution.
if (errors.empty()) if (errors.empty())
(*c.astOverloadResolvedTypes)[c.nextAstFragment] = expectedNextTy; {
(*c.astForInNextTypes)[c.nextAstFragment] = expectedNextTy;
}
auto it = begin(nextRetPack); auto it = begin(nextRetPack);
std::vector<TypeId> modifiedNextRetHead; std::vector<TypeId> modifiedNextRetHead;
@ -2380,6 +2382,31 @@ bool ConstraintSolver::tryUnify(NotNull<const Constraint> constraint, TID subTy,
return true; return true;
} }
void ConstraintSolver::bindBlockedType(TypeId blockedTy, TypeId resultTy, TypeId rootTy, Location location)
{
resultTy = follow(resultTy);
LUAU_ASSERT(get<BlockedType>(blockedTy));
if (blockedTy == resultTy)
{
rootTy = follow(rootTy);
Scope* freeScope = nullptr;
if (auto ft = get<FreeType>(rootTy))
freeScope = ft->scope;
else if (auto tt = get<TableType>(rootTy); tt && tt->state == TableState::Free)
freeScope = tt->scope;
else
iceReporter.ice("bindBlockedType couldn't find an appropriate scope for a fresh type!", location);
LUAU_ASSERT(freeScope);
asMutable(blockedTy)->ty.emplace<BoundType>(arena->freshType(freeScope));
}
else
asMutable(blockedTy)->ty.emplace<BoundType>(resultTy);
}
void ConstraintSolver::block_(BlockedConstraintId target, NotNull<const Constraint> constraint) void ConstraintSolver::block_(BlockedConstraintId target, NotNull<const Constraint> constraint)
{ {
blocked[target].push_back(constraint); blocked[target].push_back(constraint);

View File

@ -10,7 +10,6 @@
#include <stdexcept> #include <stdexcept>
#include <type_traits> #include <type_traits>
LUAU_FASTFLAGVARIABLE(LuauTypeMismatchInvarianceInError, false)
static std::string wrongNumberOfArgsString( static std::string wrongNumberOfArgsString(
size_t expectedCount, std::optional<size_t> maximumCount, size_t actualCount, const char* argPrefix = nullptr, bool isVariadic = false) size_t expectedCount, std::optional<size_t> maximumCount, size_t actualCount, const char* argPrefix = nullptr, bool isVariadic = false)
@ -106,7 +105,7 @@ struct ErrorConverter
{ {
result += "; " + tm.reason; result += "; " + tm.reason;
} }
else if (FFlag::LuauTypeMismatchInvarianceInError && tm.context == TypeMismatch::InvariantContext) else if (tm.context == TypeMismatch::InvariantContext)
{ {
result += " in an invariant context"; result += " in an invariant context";
} }

View File

@ -950,6 +950,7 @@ void Frontend::checkBuildQueueItem(BuildQueueItem& item)
module->astExpectedTypes.clear(); module->astExpectedTypes.clear();
module->astOriginalCallTypes.clear(); module->astOriginalCallTypes.clear();
module->astOverloadResolvedTypes.clear(); module->astOverloadResolvedTypes.clear();
module->astForInNextTypes.clear();
module->astResolvedTypes.clear(); module->astResolvedTypes.clear();
module->astResolvedTypePacks.clear(); module->astResolvedTypePacks.clear();
module->astScopes.clear(); module->astScopes.clear();

View File

@ -4,8 +4,6 @@
#include "Luau/TxnLog.h" #include "Luau/TxnLog.h"
#include "Luau/TypeArena.h" #include "Luau/TypeArena.h"
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution)
namespace Luau namespace Luau
{ {
@ -33,7 +31,7 @@ bool Instantiation::ignoreChildren(TypeId ty)
{ {
if (log->getMutable<FunctionType>(ty)) if (log->getMutable<FunctionType>(ty))
return true; return true;
else if (FFlag::LuauClassTypeVarsInSubstitution && get<ClassType>(ty)) else if (get<ClassType>(ty))
return true; return true;
else else
return false; return false;
@ -84,7 +82,7 @@ bool ReplaceGenerics::ignoreChildren(TypeId ty)
// whenever we quantify, so the vectors overlap if and only if they are equal. // whenever we quantify, so the vectors overlap if and only if they are equal.
return (!generics.empty() || !genericPacks.empty()) && (ftv->generics == generics) && (ftv->genericPacks == genericPacks); return (!generics.empty() || !genericPacks.empty()) && (ftv->generics == generics) && (ftv->genericPacks == genericPacks);
} }
else if (FFlag::LuauClassTypeVarsInSubstitution && get<ClassType>(ty)) else if (get<ClassType>(ty))
return true; return true;
else else
{ {

View File

@ -16,9 +16,6 @@
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution); LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAGVARIABLE(LuauClonePublicInterfaceLess2, false); LUAU_FASTFLAGVARIABLE(LuauClonePublicInterfaceLess2, false);
LUAU_FASTFLAG(LuauSubstitutionReentrant);
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution);
LUAU_FASTFLAG(LuauSubstitutionFixMissingFields);
LUAU_FASTFLAGVARIABLE(LuauCloneSkipNonInternalVisit, false); LUAU_FASTFLAGVARIABLE(LuauCloneSkipNonInternalVisit, false);
namespace Luau namespace Luau
@ -134,8 +131,6 @@ struct ClonePublicInterface : Substitution
TypeId cloneType(TypeId ty) TypeId cloneType(TypeId ty)
{ {
LUAU_ASSERT(FFlag::LuauSubstitutionReentrant && FFlag::LuauSubstitutionFixMissingFields);
std::optional<TypeId> result = substitute(ty); std::optional<TypeId> result = substitute(ty);
if (result) if (result)
{ {
@ -150,8 +145,6 @@ struct ClonePublicInterface : Substitution
TypePackId cloneTypePack(TypePackId tp) TypePackId cloneTypePack(TypePackId tp)
{ {
LUAU_ASSERT(FFlag::LuauSubstitutionReentrant && FFlag::LuauSubstitutionFixMissingFields);
std::optional<TypePackId> result = substitute(tp); std::optional<TypePackId> result = substitute(tp);
if (result) if (result)
{ {
@ -166,8 +159,6 @@ struct ClonePublicInterface : Substitution
TypeFun cloneTypeFun(const TypeFun& tf) TypeFun cloneTypeFun(const TypeFun& tf)
{ {
LUAU_ASSERT(FFlag::LuauSubstitutionReentrant && FFlag::LuauSubstitutionFixMissingFields);
std::vector<GenericTypeDefinition> typeParams; std::vector<GenericTypeDefinition> typeParams;
std::vector<GenericTypePackDefinition> typePackParams; std::vector<GenericTypePackDefinition> typePackParams;

View File

@ -19,7 +19,6 @@ LUAU_FASTINTVARIABLE(LuauNormalizeIterationLimit, 1200);
LUAU_FASTINTVARIABLE(LuauNormalizeCacheLimit, 100000); LUAU_FASTINTVARIABLE(LuauNormalizeCacheLimit, 100000);
LUAU_FASTFLAGVARIABLE(LuauNormalizeBlockedTypes, false); LUAU_FASTFLAGVARIABLE(LuauNormalizeBlockedTypes, false);
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution) LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAG(LuauUninhabitedSubAnything2)
LUAU_FASTFLAG(LuauTransitiveSubtyping) LUAU_FASTFLAG(LuauTransitiveSubtyping)
LUAU_FASTFLAG(DebugLuauReadWriteProperties) LUAU_FASTFLAG(DebugLuauReadWriteProperties)
@ -312,12 +311,6 @@ static bool isShallowInhabited(const NormalizedType& norm)
!norm.functions.isNever() || !norm.tables.empty() || !norm.tyvars.empty(); !norm.functions.isNever() || !norm.tables.empty() || !norm.tyvars.empty();
} }
bool isInhabited_DEPRECATED(const NormalizedType& norm)
{
LUAU_ASSERT(!FFlag::LuauUninhabitedSubAnything2);
return isShallowInhabited(norm);
}
bool Normalizer::isInhabited(const NormalizedType* norm, std::unordered_set<TypeId> seen) bool Normalizer::isInhabited(const NormalizedType* norm, std::unordered_set<TypeId> seen)
{ {
// If normalization failed, the type is complex, and so is more likely than not to be inhabited. // If normalization failed, the type is complex, and so is more likely than not to be inhabited.

View File

@ -10,7 +10,6 @@
LUAU_FASTFLAG(DebugLuauSharedSelf) LUAU_FASTFLAG(DebugLuauSharedSelf)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution); LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution)
namespace Luau namespace Luau
{ {
@ -244,7 +243,7 @@ struct PureQuantifier : Substitution
bool ignoreChildren(TypeId ty) override bool ignoreChildren(TypeId ty) override
{ {
if (FFlag::LuauClassTypeVarsInSubstitution && get<ClassType>(ty)) if (get<ClassType>(ty))
return true; return true;
return ty->persistent; return ty->persistent;

View File

@ -8,13 +8,11 @@
#include <algorithm> #include <algorithm>
#include <stdexcept> #include <stdexcept>
LUAU_FASTFLAGVARIABLE(LuauSubstitutionFixMissingFields, false)
LUAU_FASTFLAG(LuauClonePublicInterfaceLess2) LUAU_FASTFLAG(LuauClonePublicInterfaceLess2)
LUAU_FASTINTVARIABLE(LuauTarjanChildLimit, 10000) LUAU_FASTINTVARIABLE(LuauTarjanChildLimit, 10000)
LUAU_FASTFLAGVARIABLE(LuauClassTypeVarsInSubstitution, false)
LUAU_FASTFLAGVARIABLE(LuauSubstitutionReentrant, false)
LUAU_FASTFLAG(DebugLuauReadWriteProperties) LUAU_FASTFLAG(DebugLuauReadWriteProperties)
LUAU_FASTFLAG(LuauCloneSkipNonInternalVisit) LUAU_FASTFLAG(LuauCloneSkipNonInternalVisit)
LUAU_FASTFLAGVARIABLE(LuauTarjanSingleArr, false)
namespace Luau namespace Luau
{ {
@ -113,20 +111,35 @@ static TypeId shallowClone(TypeId ty, TypeArena& dest, const TxnLog* log, bool a
else if constexpr (std::is_same_v<T, BlockedType>) else if constexpr (std::is_same_v<T, BlockedType>)
return dest.addType(a); return dest.addType(a);
else if constexpr (std::is_same_v<T, PrimitiveType>) else if constexpr (std::is_same_v<T, PrimitiveType>)
{
LUAU_ASSERT(ty->persistent);
return ty; return ty;
}
else if constexpr (std::is_same_v<T, PendingExpansionType>) else if constexpr (std::is_same_v<T, PendingExpansionType>)
{ {
PendingExpansionType clone = PendingExpansionType{a.prefix, a.name, a.typeArguments, a.packArguments}; PendingExpansionType clone = PendingExpansionType{a.prefix, a.name, a.typeArguments, a.packArguments};
return dest.addType(std::move(clone)); return dest.addType(std::move(clone));
} }
else if constexpr (std::is_same_v<T, AnyType>) else if constexpr (std::is_same_v<T, AnyType>)
{
LUAU_ASSERT(ty->persistent);
return ty; return ty;
}
else if constexpr (std::is_same_v<T, ErrorType>) else if constexpr (std::is_same_v<T, ErrorType>)
{
LUAU_ASSERT(ty->persistent);
return ty; return ty;
}
else if constexpr (std::is_same_v<T, UnknownType>) else if constexpr (std::is_same_v<T, UnknownType>)
{
LUAU_ASSERT(ty->persistent);
return ty; return ty;
}
else if constexpr (std::is_same_v<T, NeverType>) else if constexpr (std::is_same_v<T, NeverType>)
{
LUAU_ASSERT(ty->persistent);
return ty; return ty;
}
else if constexpr (std::is_same_v<T, LazyType>) else if constexpr (std::is_same_v<T, LazyType>)
return ty; return ty;
else if constexpr (std::is_same_v<T, SingletonType>) else if constexpr (std::is_same_v<T, SingletonType>)
@ -227,13 +240,10 @@ void Tarjan::visitChildren(TypeId ty, int index)
if (const FunctionType* ftv = get<FunctionType>(ty)) if (const FunctionType* ftv = get<FunctionType>(ty))
{ {
if (FFlag::LuauSubstitutionFixMissingFields) for (TypeId generic : ftv->generics)
{ visitChild(generic);
for (TypeId generic : ftv->generics) for (TypePackId genericPack : ftv->genericPacks)
visitChild(generic); visitChild(genericPack);
for (TypePackId genericPack : ftv->genericPacks)
visitChild(genericPack);
}
visitChild(ftv->argTypes); visitChild(ftv->argTypes);
visitChild(ftv->retTypes); visitChild(ftv->retTypes);
@ -295,7 +305,7 @@ void Tarjan::visitChildren(TypeId ty, int index)
for (TypePackId a : tfit->packArguments) for (TypePackId a : tfit->packArguments)
visitChild(a); visitChild(a);
} }
else if (const ClassType* ctv = get<ClassType>(ty); FFlag::LuauClassTypeVarsInSubstitution && ctv) else if (const ClassType* ctv = get<ClassType>(ty))
{ {
for (const auto& [name, prop] : ctv->props) for (const auto& [name, prop] : ctv->props)
visitChild(prop.type()); visitChild(prop.type());
@ -348,36 +358,67 @@ std::pair<int, bool> Tarjan::indexify(TypeId ty)
{ {
ty = log->follow(ty); ty = log->follow(ty);
bool fresh = !typeToIndex.contains(ty); if (FFlag::LuauTarjanSingleArr)
int& index = typeToIndex[ty];
if (fresh)
{ {
index = int(indexToType.size()); auto [index, fresh] = typeToIndex.try_insert(ty, false);
indexToType.push_back(ty);
indexToPack.push_back(nullptr); if (fresh)
onStack.push_back(false); {
lowlink.push_back(index); index = int(nodes.size());
nodes.push_back({ty, nullptr, false, false, index});
}
return {index, fresh};
}
else
{
bool fresh = !typeToIndex.contains(ty);
int& index = typeToIndex[ty];
if (fresh)
{
index = int(indexToType.size());
indexToType.push_back(ty);
indexToPack.push_back(nullptr);
onStack.push_back(false);
lowlink.push_back(index);
}
return {index, fresh};
} }
return {index, fresh};
} }
std::pair<int, bool> Tarjan::indexify(TypePackId tp) std::pair<int, bool> Tarjan::indexify(TypePackId tp)
{ {
tp = log->follow(tp); tp = log->follow(tp);
bool fresh = !packToIndex.contains(tp); if (FFlag::LuauTarjanSingleArr)
int& index = packToIndex[tp];
if (fresh)
{ {
index = int(indexToPack.size()); auto [index, fresh] = packToIndex.try_insert(tp, false);
indexToType.push_back(nullptr);
indexToPack.push_back(tp); if (fresh)
onStack.push_back(false); {
lowlink.push_back(index); index = int(nodes.size());
nodes.push_back({nullptr, tp, false, false, index});
}
return {index, fresh};
}
else
{
bool fresh = !packToIndex.contains(tp);
int& index = packToIndex[tp];
if (fresh)
{
index = int(indexToPack.size());
indexToType.push_back(nullptr);
indexToPack.push_back(tp);
onStack.push_back(false);
lowlink.push_back(index);
}
return {index, fresh};
} }
return {index, fresh};
} }
void Tarjan::visitChild(TypeId ty) void Tarjan::visitChild(TypeId ty)
@ -397,6 +438,246 @@ void Tarjan::visitChild(TypePackId tp)
} }
TarjanResult Tarjan::loop() TarjanResult Tarjan::loop()
{
if (!FFlag::LuauTarjanSingleArr)
return loop_DEPRECATED();
// Normally Tarjan is presented recursively, but this is a hot loop, so worth optimizing
while (!worklist.empty())
{
auto [index, currEdge, lastEdge] = worklist.back();
// First visit
if (currEdge == -1)
{
++childCount;
if (childLimit > 0 && childLimit <= childCount)
return TarjanResult::TooManyChildren;
stack.push_back(index);
nodes[index].onStack = true;
currEdge = int(edgesTy.size());
// Fill in edge list of this vertex
if (TypeId ty = nodes[index].ty)
visitChildren(ty, index);
else if (TypePackId tp = nodes[index].tp)
visitChildren(tp, index);
lastEdge = int(edgesTy.size());
}
// Visit children
bool foundFresh = false;
for (; currEdge < lastEdge; currEdge++)
{
int childIndex = -1;
bool fresh = false;
if (auto ty = edgesTy[currEdge])
std::tie(childIndex, fresh) = indexify(ty);
else if (auto tp = edgesTp[currEdge])
std::tie(childIndex, fresh) = indexify(tp);
else
LUAU_ASSERT(false);
if (fresh)
{
// Original recursion point, update the parent continuation point and start the new element
worklist.back() = {index, currEdge + 1, lastEdge};
worklist.push_back({childIndex, -1, -1});
// We need to continue the top-level loop from the start with the new worklist element
foundFresh = true;
break;
}
else if (nodes[childIndex].onStack)
{
nodes[index].lowlink = std::min(nodes[index].lowlink, childIndex);
}
visitEdge(childIndex, index);
}
if (foundFresh)
continue;
if (nodes[index].lowlink == index)
{
visitSCC(index);
while (!stack.empty())
{
int popped = stack.back();
stack.pop_back();
nodes[popped].onStack = false;
if (popped == index)
break;
}
}
worklist.pop_back();
// Original return from recursion into a child
if (!worklist.empty())
{
auto [parentIndex, _, parentEndEdge] = worklist.back();
// No need to keep child edges around
edgesTy.resize(parentEndEdge);
edgesTp.resize(parentEndEdge);
nodes[parentIndex].lowlink = std::min(nodes[parentIndex].lowlink, nodes[index].lowlink);
visitEdge(index, parentIndex);
}
}
return TarjanResult::Ok;
}
TarjanResult Tarjan::visitRoot(TypeId ty)
{
childCount = 0;
if (childLimit == 0)
childLimit = FInt::LuauTarjanChildLimit;
ty = log->follow(ty);
auto [index, fresh] = indexify(ty);
worklist.push_back({index, -1, -1});
return loop();
}
TarjanResult Tarjan::visitRoot(TypePackId tp)
{
childCount = 0;
if (childLimit == 0)
childLimit = FInt::LuauTarjanChildLimit;
tp = log->follow(tp);
auto [index, fresh] = indexify(tp);
worklist.push_back({index, -1, -1});
return loop();
}
void Tarjan::clearTarjan()
{
if (FFlag::LuauTarjanSingleArr)
{
typeToIndex.clear();
packToIndex.clear();
nodes.clear();
stack.clear();
}
else
{
dirty.clear();
typeToIndex.clear();
packToIndex.clear();
indexToType.clear();
indexToPack.clear();
stack.clear();
onStack.clear();
lowlink.clear();
}
edgesTy.clear();
edgesTp.clear();
worklist.clear();
}
bool Tarjan::getDirty(int index)
{
if (FFlag::LuauTarjanSingleArr)
{
LUAU_ASSERT(size_t(index) < nodes.size());
return nodes[index].dirty;
}
else
{
if (dirty.size() <= size_t(index))
dirty.resize(index + 1, false);
return dirty[index];
}
}
void Tarjan::setDirty(int index, bool d)
{
if (FFlag::LuauTarjanSingleArr)
{
LUAU_ASSERT(size_t(index) < nodes.size());
nodes[index].dirty = d;
}
else
{
if (dirty.size() <= size_t(index))
dirty.resize(index + 1, false);
dirty[index] = d;
}
}
void Tarjan::visitEdge(int index, int parentIndex)
{
if (getDirty(index))
setDirty(parentIndex, true);
}
void Tarjan::visitSCC(int index)
{
if (!FFlag::LuauTarjanSingleArr)
return visitSCC_DEPRECATED(index);
bool d = getDirty(index);
for (auto it = stack.rbegin(); !d && it != stack.rend(); it++)
{
TarjanNode& node = nodes[*it];
if (TypeId ty = node.ty)
d = isDirty(ty);
else if (TypePackId tp = node.tp)
d = isDirty(tp);
if (*it == index)
break;
}
if (!d)
return;
for (auto it = stack.rbegin(); it != stack.rend(); it++)
{
setDirty(*it, true);
TarjanNode& node = nodes[*it];
if (TypeId ty = node.ty)
foundDirty(ty);
else if (TypePackId tp = node.tp)
foundDirty(tp);
if (*it == index)
return;
}
}
TarjanResult Tarjan::findDirty(TypeId ty)
{
return visitRoot(ty);
}
TarjanResult Tarjan::findDirty(TypePackId tp)
{
return visitRoot(tp);
}
TarjanResult Tarjan::loop_DEPRECATED()
{ {
// Normally Tarjan is presented recursively, but this is a hot loop, so worth optimizing // Normally Tarjan is presented recursively, but this is a hot loop, so worth optimizing
while (!worklist.empty()) while (!worklist.empty())
@ -492,71 +773,8 @@ TarjanResult Tarjan::loop()
return TarjanResult::Ok; return TarjanResult::Ok;
} }
TarjanResult Tarjan::visitRoot(TypeId ty)
{
childCount = 0;
if (childLimit == 0)
childLimit = FInt::LuauTarjanChildLimit;
ty = log->follow(ty); void Tarjan::visitSCC_DEPRECATED(int index)
auto [index, fresh] = indexify(ty);
worklist.push_back({index, -1, -1});
return loop();
}
TarjanResult Tarjan::visitRoot(TypePackId tp)
{
childCount = 0;
if (childLimit == 0)
childLimit = FInt::LuauTarjanChildLimit;
tp = log->follow(tp);
auto [index, fresh] = indexify(tp);
worklist.push_back({index, -1, -1});
return loop();
}
void FindDirty::clearTarjan()
{
dirty.clear();
typeToIndex.clear();
packToIndex.clear();
indexToType.clear();
indexToPack.clear();
stack.clear();
onStack.clear();
lowlink.clear();
edgesTy.clear();
edgesTp.clear();
worklist.clear();
}
bool FindDirty::getDirty(int index)
{
if (dirty.size() <= size_t(index))
dirty.resize(index + 1, false);
return dirty[index];
}
void FindDirty::setDirty(int index, bool d)
{
if (dirty.size() <= size_t(index))
dirty.resize(index + 1, false);
dirty[index] = d;
}
void FindDirty::visitEdge(int index, int parentIndex)
{
if (getDirty(index))
setDirty(parentIndex, true);
}
void FindDirty::visitSCC(int index)
{ {
bool d = getDirty(index); bool d = getDirty(index);
@ -585,23 +803,12 @@ void FindDirty::visitSCC(int index)
} }
} }
TarjanResult FindDirty::findDirty(TypeId ty)
{
return visitRoot(ty);
}
TarjanResult FindDirty::findDirty(TypePackId tp)
{
return visitRoot(tp);
}
std::optional<TypeId> Substitution::substitute(TypeId ty) std::optional<TypeId> Substitution::substitute(TypeId ty)
{ {
ty = log->follow(ty); ty = log->follow(ty);
// clear algorithm state for reentrancy // clear algorithm state for reentrancy
if (FFlag::LuauSubstitutionReentrant) clearTarjan();
clearTarjan();
auto result = findDirty(ty); auto result = findDirty(ty);
if (result != TarjanResult::Ok) if (result != TarjanResult::Ok)
@ -609,34 +816,18 @@ std::optional<TypeId> Substitution::substitute(TypeId ty)
for (auto [oldTy, newTy] : newTypes) for (auto [oldTy, newTy] : newTypes)
{ {
if (FFlag::LuauSubstitutionReentrant) if (!ignoreChildren(oldTy) && !replacedTypes.contains(newTy))
{ {
if (!ignoreChildren(oldTy) && !replacedTypes.contains(newTy)) replaceChildren(newTy);
{ replacedTypes.insert(newTy);
replaceChildren(newTy);
replacedTypes.insert(newTy);
}
}
else
{
if (!ignoreChildren(oldTy))
replaceChildren(newTy);
} }
} }
for (auto [oldTp, newTp] : newPacks) for (auto [oldTp, newTp] : newPacks)
{ {
if (FFlag::LuauSubstitutionReentrant) if (!ignoreChildren(oldTp) && !replacedTypePacks.contains(newTp))
{ {
if (!ignoreChildren(oldTp) && !replacedTypePacks.contains(newTp)) replaceChildren(newTp);
{ replacedTypePacks.insert(newTp);
replaceChildren(newTp);
replacedTypePacks.insert(newTp);
}
}
else
{
if (!ignoreChildren(oldTp))
replaceChildren(newTp);
} }
} }
TypeId newTy = replace(ty); TypeId newTy = replace(ty);
@ -648,8 +839,7 @@ std::optional<TypePackId> Substitution::substitute(TypePackId tp)
tp = log->follow(tp); tp = log->follow(tp);
// clear algorithm state for reentrancy // clear algorithm state for reentrancy
if (FFlag::LuauSubstitutionReentrant) clearTarjan();
clearTarjan();
auto result = findDirty(tp); auto result = findDirty(tp);
if (result != TarjanResult::Ok) if (result != TarjanResult::Ok)
@ -657,34 +847,18 @@ std::optional<TypePackId> Substitution::substitute(TypePackId tp)
for (auto [oldTy, newTy] : newTypes) for (auto [oldTy, newTy] : newTypes)
{ {
if (FFlag::LuauSubstitutionReentrant) if (!ignoreChildren(oldTy) && !replacedTypes.contains(newTy))
{ {
if (!ignoreChildren(oldTy) && !replacedTypes.contains(newTy)) replaceChildren(newTy);
{ replacedTypes.insert(newTy);
replaceChildren(newTy);
replacedTypes.insert(newTy);
}
}
else
{
if (!ignoreChildren(oldTy))
replaceChildren(newTy);
} }
} }
for (auto [oldTp, newTp] : newPacks) for (auto [oldTp, newTp] : newPacks)
{ {
if (FFlag::LuauSubstitutionReentrant) if (!ignoreChildren(oldTp) && !replacedTypePacks.contains(newTp))
{ {
if (!ignoreChildren(oldTp) && !replacedTypePacks.contains(newTp)) replaceChildren(newTp);
{ replacedTypePacks.insert(newTp);
replaceChildren(newTp);
replacedTypePacks.insert(newTp);
}
}
else
{
if (!ignoreChildren(oldTp))
replaceChildren(newTp);
} }
} }
TypePackId newTp = replace(tp); TypePackId newTp = replace(tp);
@ -714,8 +888,7 @@ TypePackId Substitution::clone(TypePackId tp)
{ {
VariadicTypePack clone; VariadicTypePack clone;
clone.ty = vtp->ty; clone.ty = vtp->ty;
if (FFlag::LuauSubstitutionFixMissingFields) clone.hidden = vtp->hidden;
clone.hidden = vtp->hidden;
return addTypePack(std::move(clone)); return addTypePack(std::move(clone));
} }
else if (const TypeFamilyInstanceTypePack* tfitp = get<TypeFamilyInstanceTypePack>(tp)) else if (const TypeFamilyInstanceTypePack* tfitp = get<TypeFamilyInstanceTypePack>(tp))
@ -738,7 +911,7 @@ void Substitution::foundDirty(TypeId ty)
{ {
ty = log->follow(ty); ty = log->follow(ty);
if (FFlag::LuauSubstitutionReentrant && newTypes.contains(ty)) if (newTypes.contains(ty))
return; return;
if (isDirty(ty)) if (isDirty(ty))
@ -751,7 +924,7 @@ void Substitution::foundDirty(TypePackId tp)
{ {
tp = log->follow(tp); tp = log->follow(tp);
if (FFlag::LuauSubstitutionReentrant && newPacks.contains(tp)) if (newPacks.contains(tp))
return; return;
if (isDirty(tp)) if (isDirty(tp))
@ -792,13 +965,10 @@ void Substitution::replaceChildren(TypeId ty)
if (FunctionType* ftv = getMutable<FunctionType>(ty)) if (FunctionType* ftv = getMutable<FunctionType>(ty))
{ {
if (FFlag::LuauSubstitutionFixMissingFields) for (TypeId& generic : ftv->generics)
{ generic = replace(generic);
for (TypeId& generic : ftv->generics) for (TypePackId& genericPack : ftv->genericPacks)
generic = replace(generic); genericPack = replace(genericPack);
for (TypePackId& genericPack : ftv->genericPacks)
genericPack = replace(genericPack);
}
ftv->argTypes = replace(ftv->argTypes); ftv->argTypes = replace(ftv->argTypes);
ftv->retTypes = replace(ftv->retTypes); ftv->retTypes = replace(ftv->retTypes);
@ -857,7 +1027,7 @@ void Substitution::replaceChildren(TypeId ty)
for (TypePackId& a : tfit->packArguments) for (TypePackId& a : tfit->packArguments)
a = replace(a); a = replace(a);
} }
else if (ClassType* ctv = getMutable<ClassType>(ty); FFlag::LuauClassTypeVarsInSubstitution && ctv) else if (ClassType* ctv = getMutable<ClassType>(ty))
{ {
for (auto& [name, prop] : ctv->props) for (auto& [name, prop] : ctv->props)
prop.setType(replace(prop.type())); prop.setType(replace(prop.type()));

View File

@ -7,6 +7,7 @@
#include "Luau/Common.h" #include "Luau/Common.h"
#include "Luau/DcrLogger.h" #include "Luau/DcrLogger.h"
#include "Luau/Error.h" #include "Luau/Error.h"
#include "Luau/InsertionOrderedMap.h"
#include "Luau/Instantiation.h" #include "Luau/Instantiation.h"
#include "Luau/Metamethods.h" #include "Luau/Metamethods.h"
#include "Luau/Normalize.h" #include "Luau/Normalize.h"
@ -656,7 +657,7 @@ struct TypeChecker2
// if the initial and expected types from the iterator unified during constraint solving, // if the initial and expected types from the iterator unified during constraint solving,
// we'll have a resolved type to use here, but we'll only use it if either the iterator is // we'll have a resolved type to use here, but we'll only use it if either the iterator is
// directly present in the for-in statement or if we have an iterator state constraining us // directly present in the for-in statement or if we have an iterator state constraining us
TypeId* resolvedTy = module->astOverloadResolvedTypes.find(firstValue); TypeId* resolvedTy = module->astForInNextTypes.find(firstValue);
if (resolvedTy && (!retPack || valueTypes.size() > 1)) if (resolvedTy && (!retPack || valueTypes.size() > 1))
valueTypes[0] = *resolvedTy; valueTypes[0] = *resolvedTy;
@ -1062,83 +1063,21 @@ struct TypeChecker2
// Note: this is intentionally separated from `visit(AstExprCall*)` for stack allocation purposes. // Note: this is intentionally separated from `visit(AstExprCall*)` for stack allocation purposes.
void visitCall(AstExprCall* call) void visitCall(AstExprCall* call)
{ {
TypePackId expectedRetType = lookupExpectedPack(call, testArena);
TypePack args; TypePack args;
std::vector<Location> argLocs; std::vector<Location> argLocs;
argLocs.reserve(call->args.size + 1); argLocs.reserve(call->args.size + 1);
auto maybeOriginalCallTy = module->astOriginalCallTypes.find(call); TypeId* originalCallTy = module->astOriginalCallTypes.find(call);
if (!maybeOriginalCallTy) TypeId* selectedOverloadTy = module->astOverloadResolvedTypes.find(call);
if (!originalCallTy && !selectedOverloadTy)
return; return;
TypeId originalCallTy = follow(*maybeOriginalCallTy); TypeId fnTy = follow(selectedOverloadTy ? *selectedOverloadTy : *originalCallTy);
std::vector<TypeId> overloads = flattenIntersection(originalCallTy); if (get<AnyType>(fnTy) || get<ErrorType>(fnTy) || get<NeverType>(fnTy))
if (get<AnyType>(originalCallTy) || get<ErrorType>(originalCallTy) || get<NeverType>(originalCallTy))
return; return;
else if (std::optional<TypeId> callMm = findMetatableEntry(builtinTypes, module->errors, originalCallTy, "__call", call->func->location)) else if (isOptional(fnTy))
{ {
if (get<FunctionType>(follow(*callMm))) reportError(OptionalValueAccess{fnTy}, call->func->location);
{
args.head.push_back(originalCallTy);
argLocs.push_back(call->func->location);
}
else
{
// TODO: This doesn't flag the __call metamethod as the problem
// very clearly.
reportError(CannotCallNonFunction{*callMm}, call->func->location);
return;
}
}
else if (get<FunctionType>(originalCallTy))
{
// ok.
}
else if (get<IntersectionType>(originalCallTy))
{
auto norm = normalizer.normalize(originalCallTy);
if (!norm)
return reportError(CodeTooComplex{}, call->location);
// NormalizedType::hasFunction returns true if its' tops component is `unknown`, but for soundness we want the reverse.
if (get<UnknownType>(norm->tops) || !norm->hasFunctions())
return reportError(CannotCallNonFunction{originalCallTy}, call->func->location);
}
else if (auto utv = get<UnionType>(originalCallTy))
{
// Sometimes it's okay to call a union of functions, but only if all of the functions are the same.
// Another scenario we might run into it is if the union has a nil member. In this case, we want to throw an error
if (isOptional(originalCallTy))
{
reportError(OptionalValueAccess{originalCallTy}, call->location);
return;
}
std::optional<TypeId> fst;
for (TypeId ty : utv)
{
if (!fst)
fst = follow(ty);
else if (fst != follow(ty))
{
reportError(CannotCallNonFunction{originalCallTy}, call->func->location);
return;
}
}
if (!fst)
ice->ice("UnionType had no elements, so fst is nullopt?");
originalCallTy = follow(*fst);
if (!get<FunctionType>(originalCallTy))
{
reportError(CannotCallNonFunction{originalCallTy}, call->func->location);
return;
}
}
else
{
reportError(CannotCallNonFunction{originalCallTy}, call->func->location);
return; return;
} }
@ -1161,9 +1100,12 @@ struct TypeChecker2
args.head.push_back(*argTy); args.head.push_back(*argTy);
else if (i == call->args.size - 1) else if (i == call->args.size - 1)
{ {
TypePackId* argTail = module->astTypePacks.find(arg); if (auto argTail = module->astTypePacks.find(arg))
if (argTail) {
args.tail = *argTail; auto [head, tail] = flatten(*argTail);
args.head.insert(args.head.end(), head.begin(), head.end());
args.tail = tail;
}
else else
args.tail = builtinTypes->anyTypePack; args.tail = builtinTypes->anyTypePack;
} }
@ -1171,142 +1113,318 @@ struct TypeChecker2
args.head.push_back(builtinTypes->anyType); args.head.push_back(builtinTypes->anyType);
} }
TypePackId expectedArgTypes = testArena.addTypePack(args); FunctionCallResolver resolver{
builtinTypes,
NotNull{&testArena},
NotNull{&normalizer},
NotNull{stack.back()},
ice,
call->location,
};
if (auto maybeSelectedOverload = module->astOverloadResolvedTypes.find(call)) resolver.resolve(fnTy, &args, call->func->location, &argLocs);
if (!resolver.ok.empty())
return; // We found a call that works, so this is ok.
else if (auto norm = normalizer.normalize(fnTy); !norm || !normalizer.isInhabited(norm))
{ {
// This overload might not work still: the constraint solver will if (!norm)
// pass the type checker an instantiated function type that matches reportError(NormalizationTooComplex{}, call->func->location);
// in arity, but not in subtyping, in order to allow the type else
// checker to report better error messages. return; // Ok. Calling an uninhabited type is no-op.
}
TypeId selectedOverload = follow(*maybeSelectedOverload); else if (!resolver.nonviableOverloads.empty())
const FunctionType* ftv; {
if (resolver.nonviableOverloads.size() == 1)
if (get<AnyType>(selectedOverload) || get<ErrorType>(selectedOverload) || get<NeverType>(selectedOverload)) reportErrors(resolver.nonviableOverloads.front().second);
{
return;
}
else if (const FunctionType* overloadFtv = get<FunctionType>(selectedOverload))
{
ftv = overloadFtv;
}
else else
{ {
reportError(CannotCallNonFunction{selectedOverload}, call->func->location); std::string s = "None of the overloads for function that accept ";
return; s += std::to_string(args.head.size());
} s += " arguments are compatible.";
reportError(GenericError{std::move(s)}, call->location);
TxnLog fake{};
LUAU_ASSERT(ftv);
reportErrors(tryUnify(stack.back(), call->location, ftv->retTypes, expectedRetType, CountMismatch::Context::Return, /* genericsOkay */ true));
reportErrors(
reduceFamilies(ftv->retTypes, call->location, NotNull{&testArena}, builtinTypes, stack.back(), NotNull{&normalizer}, &fake, true)
.errors);
auto it = begin(expectedArgTypes);
size_t i = 0;
std::vector<TypeId> slice;
for (TypeId arg : ftv->argTypes)
{
if (it == end(expectedArgTypes))
{
slice.push_back(arg);
continue;
}
TypeId expectedArg = *it;
Location argLoc = argLocs.at(i >= argLocs.size() ? argLocs.size() - 1 : i);
reportErrors(tryUnify(stack.back(), argLoc, expectedArg, arg, CountMismatch::Context::Arg, /* genericsOkay */ true));
reportErrors(reduceFamilies(arg, argLoc, NotNull{&testArena}, builtinTypes, stack.back(), NotNull{&normalizer}, &fake, true).errors);
++it;
++i;
}
if (slice.size() > 0 && it == end(expectedArgTypes))
{
if (auto tail = it.tail())
{
TypePackId remainingArgs = testArena.addTypePack(TypePack{std::move(slice), std::nullopt});
reportErrors(tryUnify(stack.back(), argLocs.back(), *tail, remainingArgs, CountMismatch::Context::Arg, /* genericsOkay */ true));
reportErrors(reduceFamilies(
remainingArgs, argLocs.back(), NotNull{&testArena}, builtinTypes, stack.back(), NotNull{&normalizer}, &fake, true)
.errors);
}
} }
} }
else else if (!resolver.arityMismatches.empty())
{ {
// No overload worked, even when instantiated. We need to filter the if (resolver.arityMismatches.size() == 1)
// set of overloads to those that match the arity of the incoming reportErrors(resolver.arityMismatches.front().second);
// argument set, and then report only those as not matching. else
std::vector<TypeId> arityMatchingOverloads;
ErrorVec empty;
for (TypeId overload : overloads)
{ {
overload = follow(overload); std::string s = "No overload for function accepts ";
if (const FunctionType* ftv = get<FunctionType>(overload)) s += std::to_string(args.head.size());
{ s += " arguments.";
if (size(ftv->argTypes) == size(expectedArgTypes)) reportError(GenericError{std::move(s)}, call->location);
{
arityMatchingOverloads.push_back(overload);
}
}
else if (const std::optional<TypeId> callMm = findMetatableEntry(builtinTypes, empty, overload, "__call", call->location))
{
if (const FunctionType* ftv = get<FunctionType>(follow(*callMm)))
{
if (size(ftv->argTypes) == size(expectedArgTypes))
{
arityMatchingOverloads.push_back(overload);
}
}
else
{
reportError(CannotCallNonFunction{}, call->location);
}
}
} }
}
else if (!resolver.nonFunctions.empty())
reportError(CannotCallNonFunction{fnTy}, call->func->location);
else
LUAU_ASSERT(!"Generating the best possible error from this function call resolution was inexhaustive?");
if (arityMatchingOverloads.size() == 0) if (resolver.arityMismatches.size() > 1 || resolver.nonviableOverloads.size() > 1)
{
std::string s = "Available overloads: ";
std::vector<TypeId> overloads;
if (resolver.nonviableOverloads.empty())
{ {
reportError( for (const auto& [ty, p] : resolver.resolution)
GenericError{"No overload for function accepts " + std::to_string(size(expectedArgTypes)) + " arguments."}, call->location); {
if (p.first == FunctionCallResolver::TypeIsNotAFunction)
continue;
overloads.push_back(ty);
}
} }
else else
{ {
// We have handled the case of a singular arity-matching for (const auto& [ty, _] : resolver.nonviableOverloads)
// overload above, in the case where an overload was selected. overloads.push_back(ty);
// LUAU_ASSERT(arityMatchingOverloads.size() > 1);
reportError(GenericError{"None of the overloads for function that accept " + std::to_string(size(expectedArgTypes)) +
" arguments are compatible."},
call->location);
} }
std::string s; for (size_t i = 0; i < overloads.size(); ++i)
std::vector<TypeId>& stringifyOverloads = arityMatchingOverloads.size() == 0 ? overloads : arityMatchingOverloads;
for (size_t i = 0; i < stringifyOverloads.size(); ++i)
{ {
TypeId overload = follow(stringifyOverloads[i]);
if (i > 0) if (i > 0)
s += "; "; s += (i == overloads.size() - 1) ? "; and " : "; ";
if (i > 0 && i == stringifyOverloads.size() - 1) s += toString(overloads[i]);
s += "and ";
s += toString(overload);
} }
reportError(ExtraInformation{"Available overloads: " + s}, call->func->location); reportError(ExtraInformation{std::move(s)}, call->func->location);
} }
} }
struct FunctionCallResolver
{
enum Analysis
{
Ok,
TypeIsNotAFunction,
ArityMismatch,
OverloadIsNonviable, // Arguments were incompatible with the overload's parameters, but were otherwise compatible by arity.
};
NotNull<BuiltinTypes> builtinTypes;
NotNull<TypeArena> arena;
NotNull<Normalizer> normalizer;
NotNull<Scope> scope;
NotNull<InternalErrorReporter> ice;
Location callLoc;
std::vector<TypeId> ok;
std::vector<TypeId> nonFunctions;
std::vector<std::pair<TypeId, ErrorVec>> arityMismatches;
std::vector<std::pair<TypeId, ErrorVec>> nonviableOverloads;
InsertionOrderedMap<TypeId, std::pair<Analysis, size_t>> resolution;
private:
template<typename Ty>
std::optional<ErrorVec> tryUnify(const Location& location, Ty subTy, Ty superTy)
{
Unifier u{normalizer, scope, location, Covariant};
u.ctx = CountMismatch::Arg;
u.hideousFixMeGenericsAreActuallyFree = true;
u.enableScopeTests();
u.tryUnify(subTy, superTy);
if (u.errors.empty())
return std::nullopt;
return std::move(u.errors);
}
std::pair<Analysis, ErrorVec> checkOverload(TypeId fnTy, const TypePack* args, Location fnLoc, const std::vector<Location>* argLocs, bool callMetamethodOk = true)
{
fnTy = follow(fnTy);
ErrorVec discard;
if (get<AnyType>(fnTy) || get<ErrorType>(fnTy) || get<NeverType>(fnTy))
return {Ok, {}};
else if (auto fn = get<FunctionType>(fnTy))
return checkOverload_(fnTy, fn, args, fnLoc, argLocs); // Intentionally split to reduce the stack pressure of this function.
else if (auto callMm = findMetatableEntry(builtinTypes, discard, fnTy, "__call", callLoc); callMm && callMetamethodOk)
{
// Calling a metamethod forwards the `fnTy` as self.
TypePack withSelf = *args;
withSelf.head.insert(withSelf.head.begin(), fnTy);
std::vector<Location> withSelfLocs = *argLocs;
withSelfLocs.insert(withSelfLocs.begin(), fnLoc);
return checkOverload(*callMm, &withSelf, fnLoc, &withSelfLocs, /*callMetamethodOk=*/ false);
}
else
return {TypeIsNotAFunction, {}}; // Intentionally empty. We can just fabricate the type error later on.
}
LUAU_NOINLINE
std::pair<Analysis, ErrorVec> checkOverload_(TypeId fnTy, const FunctionType* fn, const TypePack* args, Location fnLoc, const std::vector<Location>* argLocs)
{
TxnLog fake;
FamilyGraphReductionResult result = reduceFamilies(fnTy, callLoc, arena, builtinTypes, scope, normalizer, &fake, /*force=*/ true);
if (!result.errors.empty())
return {OverloadIsNonviable, result.errors};
ErrorVec argumentErrors;
// Reminder: Functions have parameters. You provide arguments.
auto paramIter = begin(fn->argTypes);
size_t argOffset = 0;
while (paramIter != end(fn->argTypes))
{
if (argOffset >= args->head.size())
break;
TypeId paramTy = *paramIter;
TypeId argTy = args->head[argOffset];
Location argLoc = argLocs->at(argOffset >= argLocs->size() ? argLocs->size() - 1 : argOffset);
if (auto errors = tryUnify(argLoc, argTy, paramTy))
{
// Since we're stopping right here, we need to decide if this is a nonviable overload or if there is an arity mismatch.
// If it's a nonviable overload, then we need to keep going to get all type errors.
auto [minParams, optMaxParams] = getParameterExtents(TxnLog::empty(), fn->argTypes);
if (args->head.size() < minParams)
return {ArityMismatch, *errors};
else
argumentErrors.insert(argumentErrors.end(), errors->begin(), errors->end());
}
++paramIter;
++argOffset;
}
while (argOffset < args->head.size())
{
// If we can iterate over the head of arguments, then we have exhausted the head of the parameters.
LUAU_ASSERT(paramIter == end(fn->argTypes));
Location argLoc = argLocs->at(argOffset >= argLocs->size() ? argLocs->size() - 1 : argOffset);
if (!paramIter.tail())
{
auto [minParams, optMaxParams] = getParameterExtents(TxnLog::empty(), fn->argTypes);
TypeError error{argLoc, CountMismatch{minParams, optMaxParams, args->head.size(), CountMismatch::Arg, false}};
return {ArityMismatch, {error}};
}
else if (auto vtp = get<VariadicTypePack>(follow(paramIter.tail())))
{
if (auto errors = tryUnify(argLoc, args->head[argOffset], vtp->ty))
argumentErrors.insert(argumentErrors.end(), errors->begin(), errors->end());
}
++argOffset;
}
while (paramIter != end(fn->argTypes))
{
// If we can iterate over parameters, then we have exhausted the head of the arguments.
LUAU_ASSERT(argOffset == args->head.size());
// It may have a tail, however, so check that.
if (auto vtp = get<VariadicTypePack>(follow(args->tail)))
{
Location argLoc = argLocs->at(argLocs->size() - 1);
if (auto errors = tryUnify(argLoc, vtp->ty, *paramIter))
argumentErrors.insert(argumentErrors.end(), errors->begin(), errors->end());
}
else if (!isOptional(*paramIter))
{
Location argLoc = argLocs->empty() ? fnLoc : argLocs->at(argLocs->size() - 1);
// It is ok to have excess parameters as long as they are all optional.
auto [minParams, optMaxParams] = getParameterExtents(TxnLog::empty(), fn->argTypes);
TypeError error{argLoc, CountMismatch{minParams, optMaxParams, args->head.size(), CountMismatch::Arg, false}};
return {ArityMismatch, {error}};
}
++paramIter;
}
// We hit the end of the heads for both parameters and arguments, so check their tails.
LUAU_ASSERT(paramIter == end(fn->argTypes));
LUAU_ASSERT(argOffset == args->head.size());
if (paramIter.tail() && args->tail)
{
Location argLoc = argLocs->at(argLocs->size() - 1);
if (auto errors = tryUnify(argLoc, *args->tail, *paramIter.tail()))
argumentErrors.insert(argumentErrors.end(), errors->begin(), errors->end());
}
return {argumentErrors.empty() ? Ok : OverloadIsNonviable, argumentErrors};
}
size_t indexof(Analysis analysis)
{
switch (analysis)
{
case Ok:
return ok.size();
case TypeIsNotAFunction:
return nonFunctions.size();
case ArityMismatch:
return arityMismatches.size();
case OverloadIsNonviable:
return nonviableOverloads.size();
}
ice->ice("Inexhaustive switch in FunctionCallResolver::indexof");
}
void add(Analysis analysis, TypeId ty, ErrorVec&& errors)
{
resolution.insert(ty, {analysis, indexof(analysis)});
switch (analysis)
{
case Ok:
LUAU_ASSERT(errors.empty());
ok.push_back(ty);
break;
case TypeIsNotAFunction:
LUAU_ASSERT(errors.empty());
nonFunctions.push_back(ty);
break;
case ArityMismatch:
LUAU_ASSERT(!errors.empty());
arityMismatches.emplace_back(ty, std::move(errors));
break;
case OverloadIsNonviable:
LUAU_ASSERT(!errors.empty());
nonviableOverloads.emplace_back(ty, std::move(errors));
break;
}
}
public:
void resolve(TypeId fnTy, const TypePack* args, Location selfLoc, const std::vector<Location>* argLocs)
{
fnTy = follow(fnTy);
auto it = get<IntersectionType>(fnTy);
if (!it)
{
auto [analysis, errors] = checkOverload(fnTy, args, selfLoc, argLocs);
add(analysis, fnTy, std::move(errors));
return;
}
for (TypeId ty : it)
{
if (resolution.find(ty) != resolution.end())
continue;
auto [analysis, errors] = checkOverload(ty, args, selfLoc, argLocs);
add(analysis, ty, std::move(errors));
}
}
};
void visit(AstExprCall* call) void visit(AstExprCall* call)
{ {
visit(call->func, ValueContext::RValue); visit(call->func, ValueContext::RValue);
@ -1584,7 +1702,11 @@ struct TypeChecker2
leftType = stripNil(builtinTypes, testArena, leftType); leftType = stripNil(builtinTypes, testArena, leftType);
} }
bool isStringOperation = isString(leftType) && isString(rightType); const NormalizedType* normLeft = normalizer.normalize(leftType);
const NormalizedType* normRight = normalizer.normalize(rightType);
bool isStringOperation =
(normLeft ? normLeft->isSubtypeOfString() : isString(leftType)) && (normRight ? normRight->isSubtypeOfString() : isString(rightType));
if (get<AnyType>(leftType) || get<ErrorType>(leftType) || get<NeverType>(leftType)) if (get<AnyType>(leftType) || get<ErrorType>(leftType) || get<NeverType>(leftType))
return leftType; return leftType;
@ -1630,15 +1752,16 @@ struct TypeChecker2
{ {
testUnion(utv, leftMt); testUnion(utv, leftMt);
} }
// If either left or right has no metatable (or both), we need to consider if
// there are values in common that could possibly inhabit the type (and thus equality could be considered)
if (!leftMt.has_value() || !rightMt.has_value())
{
matches = matches || typesHaveIntersection;
}
} }
// If we're working with things that are not tables, the metatable comparisons above are a little excessive
// It's ok for one type to have a meta table and the other to not. In that case, we should fall back on
// checking if the intersection of the types is inhabited.
// TODO: Maybe add more checks here (e.g. for functions, classes, etc)
if (!(get<TableType>(leftType) || get<TableType>(rightType)))
if (!leftMt.has_value() || !rightMt.has_value())
matches = matches || typesHaveIntersection;
if (!matches && isComparison) if (!matches && isComparison)
{ {
reportError(GenericError{format("Types %s and %s cannot be compared with %s because they do not have the same metatable", reportError(GenericError{format("Types %s and %s cannot be compared with %s because they do not have the same metatable",
@ -1663,15 +1786,15 @@ struct TypeChecker2
if (overrideKey != nullptr) if (overrideKey != nullptr)
key = overrideKey; key = overrideKey;
TypeId instantiatedMm = module->astOverloadResolvedTypes[key]; TypeId* selectedOverloadTy = module->astOverloadResolvedTypes.find(key);
if (!instantiatedMm) if (!selectedOverloadTy)
{ {
// reportError(CodeTooComplex{}, expr->location); // reportError(CodeTooComplex{}, expr->location);
// was handled by a type family // was handled by a type family
return expectedResult; return expectedResult;
} }
else if (const FunctionType* ftv = get<FunctionType>(follow(instantiatedMm))) else if (const FunctionType* ftv = get<FunctionType>(follow(*selectedOverloadTy)))
{ {
TypePackId expectedArgs; TypePackId expectedArgs;
// For >= and > we invoke __lt and __le respectively with // For >= and > we invoke __lt and __le respectively with
@ -1803,13 +1926,12 @@ struct TypeChecker2
case AstExprBinary::Op::CompareLe: case AstExprBinary::Op::CompareLe:
case AstExprBinary::Op::CompareLt: case AstExprBinary::Op::CompareLt:
{ {
const NormalizedType* leftTyNorm = normalizer.normalize(leftType); if (normLeft && normLeft->isExactlyNumber())
if (leftTyNorm && leftTyNorm->isExactlyNumber())
{ {
reportErrors(tryUnify(scope, expr->right->location, rightType, builtinTypes->numberType)); reportErrors(tryUnify(scope, expr->right->location, rightType, builtinTypes->numberType));
return builtinTypes->numberType; return builtinTypes->numberType;
} }
else if (leftTyNorm && leftTyNorm->isSubtypeOfString()) else if (normLeft && normLeft->isSubtypeOfString())
{ {
reportErrors(tryUnify(scope, expr->right->location, rightType, builtinTypes->stringType)); reportErrors(tryUnify(scope, expr->right->location, rightType, builtinTypes->stringType));
return builtinTypes->stringType; return builtinTypes->stringType;

View File

@ -35,7 +35,6 @@ LUAU_FASTFLAGVARIABLE(DebugLuauFreezeDuringUnification, false)
LUAU_FASTFLAGVARIABLE(DebugLuauSharedSelf, false) LUAU_FASTFLAGVARIABLE(DebugLuauSharedSelf, false)
LUAU_FASTFLAG(LuauInstantiateInSubtyping) LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAGVARIABLE(LuauAllowIndexClassParameters, false) LUAU_FASTFLAGVARIABLE(LuauAllowIndexClassParameters, false)
LUAU_FASTFLAG(LuauUninhabitedSubAnything2)
LUAU_FASTFLAG(LuauOccursIsntAlwaysFailure) LUAU_FASTFLAG(LuauOccursIsntAlwaysFailure)
LUAU_FASTFLAGVARIABLE(LuauTypecheckTypeguards, false) LUAU_FASTFLAGVARIABLE(LuauTypecheckTypeguards, false)
LUAU_FASTFLAGVARIABLE(LuauTinyControlFlowAnalysis, false) LUAU_FASTFLAGVARIABLE(LuauTinyControlFlowAnalysis, false)
@ -841,7 +840,7 @@ struct Demoter : Substitution
bool ignoreChildren(TypeId ty) override bool ignoreChildren(TypeId ty) override
{ {
if (FFlag::LuauClassTypeVarsInSubstitution && get<ClassType>(ty)) if (get<ClassType>(ty))
return true; return true;
return false; return false;
@ -2648,10 +2647,7 @@ static std::optional<bool> areEqComparable(NotNull<TypeArena> arena, NotNull<Nor
if (!n) if (!n)
return std::nullopt; return std::nullopt;
if (FFlag::LuauUninhabitedSubAnything2) return normalizer->isInhabited(n);
return normalizer->isInhabited(n);
else
return isInhabited_DEPRECATED(*n);
} }
TypeId TypeChecker::checkRelationalOperation( TypeId TypeChecker::checkRelationalOperation(

View File

@ -19,12 +19,10 @@
LUAU_FASTINT(LuauTypeInferTypePackLoopLimit) LUAU_FASTINT(LuauTypeInferTypePackLoopLimit)
LUAU_FASTFLAG(LuauErrorRecoveryType) LUAU_FASTFLAG(LuauErrorRecoveryType)
LUAU_FASTFLAGVARIABLE(LuauInstantiateInSubtyping, false) LUAU_FASTFLAGVARIABLE(LuauInstantiateInSubtyping, false)
LUAU_FASTFLAGVARIABLE(LuauUninhabitedSubAnything2, false)
LUAU_FASTFLAGVARIABLE(LuauVariadicAnyCanBeGeneric, false) LUAU_FASTFLAGVARIABLE(LuauVariadicAnyCanBeGeneric, false)
LUAU_FASTFLAGVARIABLE(LuauMaintainScopesInUnifier, false) LUAU_FASTFLAGVARIABLE(LuauMaintainScopesInUnifier, false)
LUAU_FASTFLAGVARIABLE(LuauTransitiveSubtyping, false) LUAU_FASTFLAGVARIABLE(LuauTransitiveSubtyping, false)
LUAU_FASTFLAGVARIABLE(LuauOccursIsntAlwaysFailure, false) LUAU_FASTFLAGVARIABLE(LuauOccursIsntAlwaysFailure, false)
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution) LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAG(LuauNormalizeBlockedTypes) LUAU_FASTFLAG(LuauNormalizeBlockedTypes)
LUAU_FASTFLAG(LuauAlwaysCommitInferencesOfFunctionCalls) LUAU_FASTFLAG(LuauAlwaysCommitInferencesOfFunctionCalls)
@ -315,7 +313,7 @@ TypePackId Widen::clean(TypePackId)
bool Widen::ignoreChildren(TypeId ty) bool Widen::ignoreChildren(TypeId ty)
{ {
if (FFlag::LuauClassTypeVarsInSubstitution && get<ClassType>(ty)) if (get<ClassType>(ty))
return true; return true;
return !log->is<UnionType>(ty); return !log->is<UnionType>(ty);
@ -748,10 +746,9 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
else if (log.get<NegationType>(superTy) || log.get<NegationType>(subTy)) else if (log.get<NegationType>(superTy) || log.get<NegationType>(subTy))
tryUnifyNegations(subTy, superTy); tryUnifyNegations(subTy, superTy);
else if (FFlag::LuauUninhabitedSubAnything2 && checkInhabited && !normalizer->isInhabited(subTy)) else if (checkInhabited && !normalizer->isInhabited(subTy))
{ {
} }
else else
reportError(location, TypeMismatch{superTy, subTy, mismatchContext()}); reportError(location, TypeMismatch{superTy, subTy, mismatchContext()});
@ -2365,7 +2362,7 @@ void Unifier::tryUnifyScalarShape(TypeId subTy, TypeId superTy, bool reversed)
TypeId osubTy = subTy; TypeId osubTy = subTy;
TypeId osuperTy = superTy; TypeId osuperTy = superTy;
if (FFlag::LuauUninhabitedSubAnything2 && checkInhabited && !normalizer->isInhabited(subTy)) if (checkInhabited && !normalizer->isInhabited(subTy))
return; return;
if (reversed) if (reversed)
@ -2739,7 +2736,7 @@ void Unifier::tryUnifyVariadics(TypePackId subTp, TypePackId superTp, bool rever
} }
} }
} }
else if (FFlag::LuauVariadicAnyCanBeGeneric && get<AnyType>(variadicTy) && log.get<GenericTypePack>(subTp)) else if (get<AnyType>(variadicTy) && log.get<GenericTypePack>(subTp))
{ {
// Nothing to do. This is ok. // Nothing to do. This is ok.
} }
@ -2893,7 +2890,7 @@ bool Unifier::occursCheck(TypeId needle, TypeId haystack, bool reversed)
if (innerState.failure) if (innerState.failure)
{ {
reportError(location, OccursCheckFailed{}); reportError(location, OccursCheckFailed{});
log.replace(needle, *builtinTypes->errorRecoveryType()); log.replace(needle, BoundType{builtinTypes->errorRecoveryType()});
} }
} }

View File

@ -129,7 +129,7 @@ static double recordDeltaTime(double& timer)
return delta; return delta;
} }
static bool compileFile(const char* name, CompileFormat format, CompileStats& stats) static bool compileFile(const char* name, CompileFormat format, Luau::CodeGen::AssemblyOptions::Target assemblyTarget, CompileStats& stats)
{ {
double currts = Luau::TimeTrace::getClock(); double currts = Luau::TimeTrace::getClock();
@ -150,6 +150,7 @@ static bool compileFile(const char* name, CompileFormat format, CompileStats& st
Luau::BytecodeBuilder bcb; Luau::BytecodeBuilder bcb;
Luau::CodeGen::AssemblyOptions options; Luau::CodeGen::AssemblyOptions options;
options.target = assemblyTarget;
options.outputBinary = format == CompileFormat::CodegenNull; options.outputBinary = format == CompileFormat::CodegenNull;
if (!options.outputBinary) if (!options.outputBinary)
@ -248,6 +249,7 @@ static void displayHelp(const char* argv0)
printf(" -h, --help: Display this usage message.\n"); printf(" -h, --help: Display this usage message.\n");
printf(" -O<n>: compile with optimization level n (default 1, n should be between 0 and 2).\n"); printf(" -O<n>: compile with optimization level n (default 1, n should be between 0 and 2).\n");
printf(" -g<n>: compile with debug level n (default 1, n should be between 0 and 2).\n"); printf(" -g<n>: compile with debug level n (default 1, n should be between 0 and 2).\n");
printf(" --target=<target>: compile code for specific architecture (a64, x64, a64_nf, x64_ms).\n");
printf(" --timetrace: record compiler time tracing information into trace.json\n"); printf(" --timetrace: record compiler time tracing information into trace.json\n");
} }
@ -264,6 +266,7 @@ int main(int argc, char** argv)
setLuauFlagsDefault(); setLuauFlagsDefault();
CompileFormat compileFormat = CompileFormat::Text; CompileFormat compileFormat = CompileFormat::Text;
Luau::CodeGen::AssemblyOptions::Target assemblyTarget = Luau::CodeGen::AssemblyOptions::Host;
for (int i = 1; i < argc; i++) for (int i = 1; i < argc; i++)
{ {
@ -292,6 +295,24 @@ int main(int argc, char** argv)
} }
globalOptions.debugLevel = level; globalOptions.debugLevel = level;
} }
else if (strncmp(argv[i], "--target=", 9) == 0)
{
const char* value = argv[i] + 9;
if (strcmp(value, "a64") == 0)
assemblyTarget = Luau::CodeGen::AssemblyOptions::A64;
else if (strcmp(value, "a64_nf") == 0)
assemblyTarget = Luau::CodeGen::AssemblyOptions::A64_NoFeatures;
else if (strcmp(value, "x64") == 0)
assemblyTarget = Luau::CodeGen::AssemblyOptions::X64_SystemV;
else if (strcmp(value, "x64_ms") == 0)
assemblyTarget = Luau::CodeGen::AssemblyOptions::X64_Windows;
else
{
fprintf(stderr, "Error: unknown target\n");
return 1;
}
}
else if (strcmp(argv[i], "--timetrace") == 0) else if (strcmp(argv[i], "--timetrace") == 0)
{ {
FFlag::DebugLuauTimeTracing.value = true; FFlag::DebugLuauTimeTracing.value = true;
@ -331,7 +352,7 @@ int main(int argc, char** argv)
int failed = 0; int failed = 0;
for (const std::string& path : files) for (const std::string& path : files)
failed += !compileFile(path.c_str(), compileFormat, stats); failed += !compileFile(path.c_str(), compileFormat, assemblyTarget, stats);
if (compileFormat == CompileFormat::Null) if (compileFormat == CompileFormat::Null)
printf("Compiled %d KLOC into %d KB bytecode (read %.2fs, parse %.2fs, compile %.2fs)\n", int(stats.lines / 1000), int(stats.bytecode / 1024), printf("Compiled %d KLOC into %d KB bytecode (read %.2fs, parse %.2fs, compile %.2fs)\n", int(stats.lines / 1000), int(stats.bytecode / 1024),

View File

@ -23,6 +23,17 @@ using AnnotatorFn = void (*)(void* context, std::string& result, int fid, int in
struct AssemblyOptions struct AssemblyOptions
{ {
enum Target
{
Host,
A64,
A64_NoFeatures,
X64_Windows,
X64_SystemV,
};
Target target = Host;
bool outputBinary = false; bool outputBinary = false;
bool includeAssembly = false; bool includeAssembly = false;

View File

@ -414,6 +414,7 @@ enum class IrCmd : uint8_t
// Handle GC write barrier (forward) // Handle GC write barrier (forward)
// A: pointer (GCObject) // A: pointer (GCObject)
// B: Rn (TValue that was written to the object) // B: Rn (TValue that was written to the object)
// C: tag/undef (tag of the value that was written)
BARRIER_OBJ, BARRIER_OBJ,
// Handle GC write barrier (backwards) for a write into a table // Handle GC write barrier (backwards) for a write into a table
@ -423,6 +424,7 @@ enum class IrCmd : uint8_t
// Handle GC write barrier (forward) for a write into a table // Handle GC write barrier (forward) for a write into a table
// A: pointer (Table) // A: pointer (Table)
// B: Rn (TValue that was written to the object) // B: Rn (TValue that was written to the object)
// C: tag/undef (tag of the value that was written)
BARRIER_TABLE_FORWARD, BARRIER_TABLE_FORWARD,
// Update savedpc value // Update savedpc value
@ -584,6 +586,14 @@ enum class IrCmd : uint8_t
// B: double // B: double
// C: double/int (optional, 2nd argument) // C: double/int (optional, 2nd argument)
INVOKE_LIBM, INVOKE_LIBM,
// Returns the string name of a type based on tag, alternative for type(x)
// A: tag
GET_TYPE,
// Returns the string name of a type either from a __type metatable field or just based on the tag, alternative for typeof(x)
// A: Rn
GET_TYPEOF,
}; };
enum class IrConstKind : uint8_t enum class IrConstKind : uint8_t

View File

@ -189,6 +189,8 @@ inline bool hasResult(IrCmd cmd)
case IrCmd::BITCOUNTLZ_UINT: case IrCmd::BITCOUNTLZ_UINT:
case IrCmd::BITCOUNTRZ_UINT: case IrCmd::BITCOUNTRZ_UINT:
case IrCmd::INVOKE_LIBM: case IrCmd::INVOKE_LIBM:
case IrCmd::GET_TYPE:
case IrCmd::GET_TYPEOF:
return true; return true;
default: default:
break; break;

View File

@ -1,15 +1,12 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/CodeGen.h" #include "Luau/CodeGen.h"
#include "CodeGenLower.h"
#include "Luau/Common.h" #include "Luau/Common.h"
#include "Luau/CodeAllocator.h" #include "Luau/CodeAllocator.h"
#include "Luau/CodeBlockUnwind.h" #include "Luau/CodeBlockUnwind.h"
#include "Luau/IrAnalysis.h"
#include "Luau/IrBuilder.h" #include "Luau/IrBuilder.h"
#include "Luau/IrDump.h"
#include "Luau/IrUtils.h"
#include "Luau/OptimizeConstProp.h"
#include "Luau/OptimizeFinalX64.h"
#include "Luau/UnwindBuilder.h" #include "Luau/UnwindBuilder.h"
#include "Luau/UnwindBuilderDwarf2.h" #include "Luau/UnwindBuilderDwarf2.h"
@ -21,17 +18,10 @@
#include "NativeState.h" #include "NativeState.h"
#include "CodeGenA64.h" #include "CodeGenA64.h"
#include "EmitCommonA64.h"
#include "IrLoweringA64.h"
#include "CodeGenX64.h" #include "CodeGenX64.h"
#include "EmitCommonX64.h"
#include "EmitInstructionX64.h"
#include "IrLoweringX64.h"
#include "lapi.h" #include "lapi.h"
#include <algorithm>
#include <memory> #include <memory>
#include <optional> #include <optional>
@ -107,238 +97,14 @@ static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
gPerfLogFn(gPerfLogContext, addr, size, name); gPerfLogFn(gPerfLogContext, addr, size, name);
} }
template<typename AssemblyBuilder, typename IrLowering>
static bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& function, int bytecodeid, AssemblyOptions options)
{
// While we will need a better block ordering in the future, right now we want to mostly preserve build order with fallbacks outlined
std::vector<uint32_t> sortedBlocks;
sortedBlocks.reserve(function.blocks.size());
for (uint32_t i = 0; i < function.blocks.size(); i++)
sortedBlocks.push_back(i);
std::sort(sortedBlocks.begin(), sortedBlocks.end(), [&](uint32_t idxA, uint32_t idxB) {
const IrBlock& a = function.blocks[idxA];
const IrBlock& b = function.blocks[idxB];
// Place fallback blocks at the end
if ((a.kind == IrBlockKind::Fallback) != (b.kind == IrBlockKind::Fallback))
return (a.kind == IrBlockKind::Fallback) < (b.kind == IrBlockKind::Fallback);
// Try to order by instruction order
return a.sortkey < b.sortkey;
});
// For each IR instruction that begins a bytecode instruction, which bytecode instruction is it?
std::vector<uint32_t> bcLocations(function.instructions.size() + 1, ~0u);
for (size_t i = 0; i < function.bcMapping.size(); ++i)
{
uint32_t irLocation = function.bcMapping[i].irLocation;
if (irLocation != ~0u)
bcLocations[irLocation] = uint32_t(i);
}
bool outputEnabled = options.includeAssembly || options.includeIr;
IrToStringContext ctx{build.text, function.blocks, function.constants, function.cfg};
// We use this to skip outlined fallback blocks from IR/asm text output
size_t textSize = build.text.length();
uint32_t codeSize = build.getCodeSize();
bool seenFallback = false;
IrBlock dummy;
dummy.start = ~0u;
for (size_t i = 0; i < sortedBlocks.size(); ++i)
{
uint32_t blockIndex = sortedBlocks[i];
IrBlock& block = function.blocks[blockIndex];
if (block.kind == IrBlockKind::Dead)
continue;
LUAU_ASSERT(block.start != ~0u);
LUAU_ASSERT(block.finish != ~0u);
// If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them
if (block.kind == IrBlockKind::Fallback && !seenFallback)
{
textSize = build.text.length();
codeSize = build.getCodeSize();
seenFallback = true;
}
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, /* includeUseInfo */ true);
}
// Values can only reference restore operands in the current block
function.validRestoreOpBlockIdx = blockIndex;
build.setLabel(block.label);
for (uint32_t index = block.start; index <= block.finish; index++)
{
LUAU_ASSERT(index < function.instructions.size());
uint32_t bcLocation = bcLocations[index];
// If IR instruction is the first one for the original bytecode, we can annotate it with source code text
if (outputEnabled && options.annotator && bcLocation != ~0u)
{
options.annotator(options.annotatorContext, build.text, bytecodeid, bcLocation);
}
// If bytecode needs the location of this instruction for jumps, record it
if (bcLocation != ~0u)
{
Label label = (index == block.start) ? block.label : build.setLabel();
function.bcMapping[bcLocation].asmLocation = build.getLabelOffset(label);
}
IrInst& inst = function.instructions[index];
// Skip pseudo instructions, but make sure they are not used at this stage
// This also prevents them from getting into text output when that's enabled
if (isPseudo(inst.cmd))
{
LUAU_ASSERT(inst.useCount == 0);
continue;
}
// Either instruction result value is not referenced or the use count is not zero
LUAU_ASSERT(inst.lastUse == 0 || inst.useCount != 0);
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, inst, index, /* includeUseInfo */ true);
}
IrBlock& next = i + 1 < sortedBlocks.size() ? function.blocks[sortedBlocks[i + 1]] : dummy;
lowering.lowerInst(inst, index, next);
if (lowering.hasError())
{
// Place labels for all blocks that we're skipping
// This is needed to avoid AssemblyBuilder assertions about jumps in earlier blocks with unplaced labels
for (size_t j = i + 1; j < sortedBlocks.size(); ++j)
{
IrBlock& abandoned = function.blocks[sortedBlocks[j]];
build.setLabel(abandoned.label);
}
lowering.finishFunction();
return false;
}
}
lowering.finishBlock();
if (options.includeIr)
build.logAppend("#\n");
}
if (!seenFallback)
{
textSize = build.text.length();
codeSize = build.getCodeSize();
}
lowering.finishFunction();
if (outputEnabled && !options.includeOutlinedCode && textSize < build.text.size())
{
build.text.resize(textSize);
if (options.includeAssembly)
build.logAppend("; skipping %u bytes of outlined code\n", unsigned((build.getCodeSize() - codeSize) * sizeof(build.code[0])));
}
return true;
}
[[maybe_unused]] static bool lowerIr(
X64::AssemblyBuilderX64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{
optimizeMemoryOperandsX64(ir.function);
X64::IrLoweringX64 lowering(build, helpers, data, ir.function);
return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
}
[[maybe_unused]] static bool lowerIr(
A64::AssemblyBuilderA64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{
A64::IrLoweringA64 lowering(build, helpers, data, ir.function);
return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
}
template<typename AssemblyBuilder> template<typename AssemblyBuilder>
static std::optional<NativeProto> assembleFunction(AssemblyBuilder& build, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options) static std::optional<NativeProto> createNativeFunction(AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto)
{ {
if (options.includeAssembly || options.includeIr)
{
if (proto->debugname)
build.logAppend("; function %s(", getstr(proto->debugname));
else
build.logAppend("; function(");
for (int i = 0; i < proto->numparams; i++)
{
LocVar* var = proto->locvars ? &proto->locvars[proto->sizelocvars - proto->numparams + i] : nullptr;
if (var && var->varname)
build.logAppend("%s%s", i == 0 ? "" : ", ", getstr(var->varname));
else
build.logAppend("%s$arg%d", i == 0 ? "" : ", ", i);
}
if (proto->numparams != 0 && proto->is_vararg)
build.logAppend(", ...)");
else
build.logAppend(")");
if (proto->linedefined >= 0)
build.logAppend(" line %d\n", proto->linedefined);
else
build.logAppend("\n");
}
IrBuilder ir; IrBuilder ir;
ir.buildFunctionIr(proto); ir.buildFunctionIr(proto);
computeCfgInfo(ir.function); if (!lowerFunction(ir, build, helpers, proto, {}))
if (!FFlag::DebugCodegenNoOpt)
{
bool useValueNumbering = !FFlag::DebugCodegenSkipNumbering;
constPropInBlockChains(ir, useValueNumbering);
if (!FFlag::DebugCodegenOptSize)
createLinearBlocks(ir, useValueNumbering);
}
if (!lowerIr(build, ir, data, helpers, proto, options))
{
if (build.logText)
build.logAppend("; skipping (can't lower)\n\n");
return std::nullopt; return std::nullopt;
}
if (build.logText)
build.logAppend("\n");
return createNativeProto(proto, ir); return createNativeProto(proto, ir);
} }
@ -384,7 +150,7 @@ static void onSetBreakpoint(lua_State* L, Proto* proto, int instruction)
} }
#if defined(__aarch64__) #if defined(__aarch64__)
static unsigned int getCpuFeaturesA64() unsigned int getCpuFeaturesA64()
{ {
unsigned int result = 0; unsigned int result = 0;
@ -482,21 +248,6 @@ void create(lua_State* L)
ecb->setbreakpoint = onSetBreakpoint; ecb->setbreakpoint = onSetBreakpoint;
} }
static void gatherFunctions(std::vector<Proto*>& results, Proto* proto)
{
if (results.size() <= size_t(proto->bytecodeid))
results.resize(proto->bytecodeid + 1);
// Skip protos that we've already compiled in this run: this happens because at -O2, inlined functions get their protos reused
if (results[proto->bytecodeid])
return;
results[proto->bytecodeid] = proto;
for (int i = 0; i < proto->sizep; i++)
gatherFunctions(results, proto->p[i]);
}
void compile(lua_State* L, int idx) void compile(lua_State* L, int idx)
{ {
LUAU_ASSERT(lua_isLfunction(L, idx)); LUAU_ASSERT(lua_isLfunction(L, idx));
@ -529,7 +280,7 @@ void compile(lua_State* L, int idx)
// Skip protos that have been compiled during previous invocations of CodeGen::compile // Skip protos that have been compiled during previous invocations of CodeGen::compile
for (Proto* p : protos) for (Proto* p : protos)
if (p && p->execdata == nullptr) if (p && p->execdata == nullptr)
if (std::optional<NativeProto> np = assembleFunction(build, *data, helpers, p, {})) if (std::optional<NativeProto> np = createNativeFunction(build, helpers, p))
results.push_back(*np); results.push_back(*np);
// Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module // Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module
@ -580,51 +331,6 @@ void compile(lua_State* L, int idx)
} }
} }
std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
{
LUAU_ASSERT(lua_isLfunction(L, idx));
const TValue* func = luaA_toobject(L, idx);
#if defined(__aarch64__)
A64::AssemblyBuilderA64 build(/* logText= */ options.includeAssembly, getCpuFeaturesA64());
#else
X64::AssemblyBuilderX64 build(/* logText= */ options.includeAssembly);
#endif
NativeState data;
initFunctions(data);
std::vector<Proto*> protos;
gatherFunctions(protos, clvalue(func)->l.p);
ModuleHelpers helpers;
#if defined(__aarch64__)
A64::assembleHelpers(build, helpers);
#else
X64::assembleHelpers(build, helpers);
#endif
if (!options.includeOutlinedCode && options.includeAssembly)
{
build.text.clear();
build.logAppend("; skipping %u bytes of outlined helpers\n", unsigned(build.getCodeSize() * sizeof(build.code[0])));
}
for (Proto* p : protos)
if (p)
if (std::optional<NativeProto> np = assembleFunction(build, data, helpers, p, options))
destroyExecData(np->execdata);
if (!build.finalize())
return std::string();
if (options.outputBinary)
return std::string(reinterpret_cast<const char*>(build.code.data()), reinterpret_cast<const char*>(build.code.data() + build.code.size())) +
std::string(build.data.begin(), build.data.end());
else
return build.text;
}
void setPerfLog(void* context, PerfLogFn logFn) void setPerfLog(void* context, PerfLogFn logFn)
{ {
gPerfLogContext = context; gPerfLogContext = context;

View File

@ -0,0 +1,146 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/CodeGen.h"
#include "CodeGenLower.h"
#include "CodeGenA64.h"
#include "CodeGenX64.h"
#include "lapi.h"
namespace Luau
{
namespace CodeGen
{
template<typename AssemblyBuilder>
static void logFunctionHeader(AssemblyBuilder& build, Proto* proto)
{
if (proto->debugname)
build.logAppend("; function %s(", getstr(proto->debugname));
else
build.logAppend("; function(");
for (int i = 0; i < proto->numparams; i++)
{
LocVar* var = proto->locvars ? &proto->locvars[proto->sizelocvars - proto->numparams + i] : nullptr;
if (var && var->varname)
build.logAppend("%s%s", i == 0 ? "" : ", ", getstr(var->varname));
else
build.logAppend("%s$arg%d", i == 0 ? "" : ", ", i);
}
if (proto->numparams != 0 && proto->is_vararg)
build.logAppend(", ...)");
else
build.logAppend(")");
if (proto->linedefined >= 0)
build.logAppend(" line %d\n", proto->linedefined);
else
build.logAppend("\n");
}
template<typename AssemblyBuilder>
static std::string getAssemblyImpl(AssemblyBuilder& build, const TValue* func, AssemblyOptions options)
{
std::vector<Proto*> protos;
gatherFunctions(protos, clvalue(func)->l.p);
ModuleHelpers helpers;
assembleHelpers(build, helpers);
if (!options.includeOutlinedCode && options.includeAssembly)
{
build.text.clear();
build.logAppend("; skipping %u bytes of outlined helpers\n", unsigned(build.getCodeSize() * sizeof(build.code[0])));
}
for (Proto* p : protos)
if (p)
{
IrBuilder ir;
ir.buildFunctionIr(p);
if (options.includeAssembly || options.includeIr)
logFunctionHeader(build, p);
if (!lowerFunction(ir, build, helpers, p, options))
{
if (build.logText)
build.logAppend("; skipping (can't lower)\n");
}
if (build.logText)
build.logAppend("\n");
}
if (!build.finalize())
return std::string();
if (options.outputBinary)
return std::string(reinterpret_cast<const char*>(build.code.data()), reinterpret_cast<const char*>(build.code.data() + build.code.size())) +
std::string(build.data.begin(), build.data.end());
else
return build.text;
}
#if defined(__aarch64__)
unsigned int getCpuFeaturesA64();
#endif
std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
{
LUAU_ASSERT(lua_isLfunction(L, idx));
const TValue* func = luaA_toobject(L, idx);
switch (options.target)
{
case AssemblyOptions::Host:
{
#if defined(__aarch64__)
A64::AssemblyBuilderA64 build(/* logText= */ options.includeAssembly, getCpuFeaturesA64());
#else
X64::AssemblyBuilderX64 build(/* logText= */ options.includeAssembly);
#endif
return getAssemblyImpl(build, func, options);
}
case AssemblyOptions::A64:
{
A64::AssemblyBuilderA64 build(/* logText= */ options.includeAssembly, /* features= */ A64::Feature_JSCVT);
return getAssemblyImpl(build, func, options);
}
case AssemblyOptions::A64_NoFeatures:
{
A64::AssemblyBuilderA64 build(/* logText= */ options.includeAssembly, /* features= */ 0);
return getAssemblyImpl(build, func, options);
}
case AssemblyOptions::X64_Windows:
{
X64::AssemblyBuilderX64 build(/* logText= */ options.includeAssembly, X64::ABIX64::Windows);
return getAssemblyImpl(build, func, options);
}
case AssemblyOptions::X64_SystemV:
{
X64::AssemblyBuilderX64 build(/* logText= */ options.includeAssembly, X64::ABIX64::SystemV);
return getAssemblyImpl(build, func, options);
}
default:
LUAU_ASSERT(!"Unknown target");
return std::string();
}
}
} // namespace CodeGen
} // namespace Luau

240
CodeGen/src/CodeGenLower.h Normal file
View File

@ -0,0 +1,240 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/CodeGen.h"
#include "Luau/IrBuilder.h"
#include "Luau/IrDump.h"
#include "Luau/IrUtils.h"
#include "Luau/OptimizeConstProp.h"
#include "Luau/OptimizeFinalX64.h"
#include "EmitCommon.h"
#include "IrLoweringA64.h"
#include "IrLoweringX64.h"
#include "lobject.h"
#include "lstate.h"
#include <algorithm>
#include <vector>
LUAU_FASTFLAG(DebugCodegenNoOpt)
LUAU_FASTFLAG(DebugCodegenOptSize)
LUAU_FASTFLAG(DebugCodegenSkipNumbering)
namespace Luau
{
namespace CodeGen
{
inline void gatherFunctions(std::vector<Proto*>& results, Proto* proto)
{
if (results.size() <= size_t(proto->bytecodeid))
results.resize(proto->bytecodeid + 1);
// Skip protos that we've already compiled in this run: this happens because at -O2, inlined functions get their protos reused
if (results[proto->bytecodeid])
return;
results[proto->bytecodeid] = proto;
for (int i = 0; i < proto->sizep; i++)
gatherFunctions(results, proto->p[i]);
}
template<typename AssemblyBuilder, typename IrLowering>
inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& function, int bytecodeid, AssemblyOptions options)
{
// While we will need a better block ordering in the future, right now we want to mostly preserve build order with fallbacks outlined
std::vector<uint32_t> sortedBlocks;
sortedBlocks.reserve(function.blocks.size());
for (uint32_t i = 0; i < function.blocks.size(); i++)
sortedBlocks.push_back(i);
std::sort(sortedBlocks.begin(), sortedBlocks.end(), [&](uint32_t idxA, uint32_t idxB) {
const IrBlock& a = function.blocks[idxA];
const IrBlock& b = function.blocks[idxB];
// Place fallback blocks at the end
if ((a.kind == IrBlockKind::Fallback) != (b.kind == IrBlockKind::Fallback))
return (a.kind == IrBlockKind::Fallback) < (b.kind == IrBlockKind::Fallback);
// Try to order by instruction order
return a.sortkey < b.sortkey;
});
// For each IR instruction that begins a bytecode instruction, which bytecode instruction is it?
std::vector<uint32_t> bcLocations(function.instructions.size() + 1, ~0u);
for (size_t i = 0; i < function.bcMapping.size(); ++i)
{
uint32_t irLocation = function.bcMapping[i].irLocation;
if (irLocation != ~0u)
bcLocations[irLocation] = uint32_t(i);
}
bool outputEnabled = options.includeAssembly || options.includeIr;
IrToStringContext ctx{build.text, function.blocks, function.constants, function.cfg};
// We use this to skip outlined fallback blocks from IR/asm text output
size_t textSize = build.text.length();
uint32_t codeSize = build.getCodeSize();
bool seenFallback = false;
IrBlock dummy;
dummy.start = ~0u;
for (size_t i = 0; i < sortedBlocks.size(); ++i)
{
uint32_t blockIndex = sortedBlocks[i];
IrBlock& block = function.blocks[blockIndex];
if (block.kind == IrBlockKind::Dead)
continue;
LUAU_ASSERT(block.start != ~0u);
LUAU_ASSERT(block.finish != ~0u);
// If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them
if (block.kind == IrBlockKind::Fallback && !seenFallback)
{
textSize = build.text.length();
codeSize = build.getCodeSize();
seenFallback = true;
}
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, /* includeUseInfo */ true);
}
// Values can only reference restore operands in the current block
function.validRestoreOpBlockIdx = blockIndex;
build.setLabel(block.label);
for (uint32_t index = block.start; index <= block.finish; index++)
{
LUAU_ASSERT(index < function.instructions.size());
uint32_t bcLocation = bcLocations[index];
// If IR instruction is the first one for the original bytecode, we can annotate it with source code text
if (outputEnabled && options.annotator && bcLocation != ~0u)
{
options.annotator(options.annotatorContext, build.text, bytecodeid, bcLocation);
}
// If bytecode needs the location of this instruction for jumps, record it
if (bcLocation != ~0u)
{
Label label = (index == block.start) ? block.label : build.setLabel();
function.bcMapping[bcLocation].asmLocation = build.getLabelOffset(label);
}
IrInst& inst = function.instructions[index];
// Skip pseudo instructions, but make sure they are not used at this stage
// This also prevents them from getting into text output when that's enabled
if (isPseudo(inst.cmd))
{
LUAU_ASSERT(inst.useCount == 0);
continue;
}
// Either instruction result value is not referenced or the use count is not zero
LUAU_ASSERT(inst.lastUse == 0 || inst.useCount != 0);
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, inst, index, /* includeUseInfo */ true);
}
IrBlock& next = i + 1 < sortedBlocks.size() ? function.blocks[sortedBlocks[i + 1]] : dummy;
lowering.lowerInst(inst, index, next);
if (lowering.hasError())
{
// Place labels for all blocks that we're skipping
// This is needed to avoid AssemblyBuilder assertions about jumps in earlier blocks with unplaced labels
for (size_t j = i + 1; j < sortedBlocks.size(); ++j)
{
IrBlock& abandoned = function.blocks[sortedBlocks[j]];
build.setLabel(abandoned.label);
}
lowering.finishFunction();
return false;
}
}
lowering.finishBlock();
if (options.includeIr)
build.logAppend("#\n");
}
if (!seenFallback)
{
textSize = build.text.length();
codeSize = build.getCodeSize();
}
lowering.finishFunction();
if (outputEnabled && !options.includeOutlinedCode && textSize < build.text.size())
{
build.text.resize(textSize);
if (options.includeAssembly)
build.logAppend("; skipping %u bytes of outlined code\n", unsigned((build.getCodeSize() - codeSize) * sizeof(build.code[0])));
}
return true;
}
inline bool lowerIr(X64::AssemblyBuilderX64& build, IrBuilder& ir, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{
optimizeMemoryOperandsX64(ir.function);
X64::IrLoweringX64 lowering(build, helpers, ir.function);
return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
}
inline bool lowerIr(A64::AssemblyBuilderA64& build, IrBuilder& ir, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{
A64::IrLoweringA64 lowering(build, helpers, ir.function);
return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
}
template<typename AssemblyBuilder>
inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{
computeCfgInfo(ir.function);
if (!FFlag::DebugCodegenNoOpt)
{
bool useValueNumbering = !FFlag::DebugCodegenSkipNumbering;
constPropInBlockChains(ir, useValueNumbering);
if (!FFlag::DebugCodegenOptSize)
createLinearBlocks(ir, useValueNumbering);
}
return lowerIr(build, ir, helpers, proto, options);
}
} // namespace CodeGen
} // namespace Luau

View File

@ -75,29 +75,6 @@ static void emitBuiltinMathSign(IrRegAllocX64& regs, AssemblyBuilderX64& build,
build.vmovsd(luauRegValue(ra), tmp0.reg); build.vmovsd(luauRegValue(ra), tmp0.reg);
} }
static void emitBuiltinType(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int arg)
{
ScopedRegX64 tmp0{regs, SizeX64::qword};
ScopedRegX64 tag{regs, SizeX64::dword};
build.mov(tag.reg, luauRegTag(arg));
build.mov(tmp0.reg, qword[rState + offsetof(lua_State, global)]);
build.mov(tmp0.reg, qword[tmp0.reg + qwordReg(tag.reg) * sizeof(TString*) + offsetof(global_State, ttname)]);
build.mov(luauRegValue(ra), tmp0.reg);
}
static void emitBuiltinTypeof(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int arg)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, luauRegAddress(arg));
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaT_objtypenamestr)]);
build.mov(luauRegValue(ra), rax);
}
void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int ra, int arg, OperandX64 arg2, int nparams, int nresults) void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int ra, int arg, OperandX64 arg2, int nparams, int nresults)
{ {
switch (bfid) switch (bfid)
@ -111,12 +88,6 @@ void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int r
case LBF_MATH_SIGN: case LBF_MATH_SIGN:
LUAU_ASSERT(nparams == 1 && nresults == 1); LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathSign(regs, build, ra, arg); return emitBuiltinMathSign(regs, build, ra, arg);
case LBF_TYPE:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinType(regs, build, ra, arg);
case LBF_TYPEOF:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinTypeof(regs, build, ra, arg);
default: default:
LUAU_ASSERT(!"Missing x64 lowering"); LUAU_ASSERT(!"Missing x64 lowering");
} }

View File

@ -5,6 +5,7 @@
#include "Luau/IrCallWrapperX64.h" #include "Luau/IrCallWrapperX64.h"
#include "Luau/IrData.h" #include "Luau/IrData.h"
#include "Luau/IrRegAllocX64.h" #include "Luau/IrRegAllocX64.h"
#include "Luau/IrUtils.h"
#include "NativeState.h" #include "NativeState.h"
@ -179,11 +180,15 @@ void callSetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, Operan
emitUpdateBase(build); emitUpdateBase(build);
} }
void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, int ra, Label& skip) void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, int ra, int ratag, Label& skip)
{ {
// iscollectable(ra) // Barrier should've been optimized away if we know that it's not collectable, checking for correctness
build.cmp(luauRegTag(ra), LUA_TSTRING); if (ratag == -1 || !isGCO(ratag))
build.jcc(ConditionX64::Less, skip); {
// iscollectable(ra)
build.cmp(luauRegTag(ra), LUA_TSTRING);
build.jcc(ConditionX64::Less, skip);
}
// isblack(obj2gco(o)) // isblack(obj2gco(o))
build.test(byte[object + offsetof(GCheader, marked)], bitmask(BLACKBIT)); build.test(byte[object + offsetof(GCheader, marked)], bitmask(BLACKBIT));
@ -195,12 +200,12 @@ void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, Re
build.jcc(ConditionX64::Zero, skip); build.jcc(ConditionX64::Zero, skip);
} }
void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, int ra) void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, int ra, int ratag)
{ {
Label skip; Label skip;
ScopedRegX64 tmp{regs, SizeX64::qword}; ScopedRegX64 tmp{regs, SizeX64::qword};
checkObjectBarrierConditions(build, tmp.reg, object, ra, skip); checkObjectBarrierConditions(build, tmp.reg, object, ra, ratag, skip);
{ {
ScopedSpills spillGuard(regs); ScopedSpills spillGuard(regs);

View File

@ -170,8 +170,8 @@ void callLengthHelper(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, in
void callPrepareForN(IrRegAllocX64& regs, AssemblyBuilderX64& build, int limit, int step, int init); void callPrepareForN(IrRegAllocX64& regs, AssemblyBuilderX64& build, int limit, int step, int init);
void callGetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra); void callGetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra);
void callSetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra); void callSetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra);
void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, int ra, Label& skip); void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, int ra, int ratag, Label& skip);
void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, int ra); void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, int ra, int ratag);
void callBarrierTableFast(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 table, IrOp tableOp); void callBarrierTableFast(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 table, IrOp tableOp);
void callStepGc(IrRegAllocX64& regs, AssemblyBuilderX64& build); void callStepGc(IrRegAllocX64& regs, AssemblyBuilderX64& build);

View File

@ -444,6 +444,9 @@ static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock&
case IrCmd::ADJUST_STACK_TO_TOP: case IrCmd::ADJUST_STACK_TO_TOP:
// While this can be considered to be a vararg consumer, it is already handled in fastcall instructions // While this can be considered to be a vararg consumer, it is already handled in fastcall instructions
break; break;
case IrCmd::GET_TYPEOF:
use(inst.a);
break;
default: default:
// All instructions which reference registers have to be handled explicitly // All instructions which reference registers have to be handled explicitly

View File

@ -297,6 +297,10 @@ const char* getCmdName(IrCmd cmd)
return "BITCOUNTRZ_UINT"; return "BITCOUNTRZ_UINT";
case IrCmd::INVOKE_LIBM: case IrCmd::INVOKE_LIBM:
return "INVOKE_LIBM"; return "INVOKE_LIBM";
case IrCmd::GET_TYPE:
return "GET_TYPE";
case IrCmd::GET_TYPEOF:
return "GET_TYPEOF";
} }
LUAU_UNREACHABLE(); LUAU_UNREACHABLE();

View File

@ -60,14 +60,18 @@ inline ConditionA64 getConditionFP(IrCondition cond)
} }
} }
static void checkObjectBarrierConditions(AssemblyBuilderA64& build, RegisterA64 object, RegisterA64 temp, int ra, Label& skip) static void checkObjectBarrierConditions(AssemblyBuilderA64& build, RegisterA64 object, RegisterA64 temp, int ra, int ratag, Label& skip)
{ {
RegisterA64 tempw = castReg(KindA64::w, temp); RegisterA64 tempw = castReg(KindA64::w, temp);
// iscollectable(ra) // Barrier should've been optimized away if we know that it's not collectable, checking for correctness
build.ldr(tempw, mem(rBase, ra * sizeof(TValue) + offsetof(TValue, tt))); if (ratag == -1 || !isGCO(ratag))
build.cmp(tempw, LUA_TSTRING); {
build.b(ConditionA64::Less, skip); // iscollectable(ra)
build.ldr(tempw, mem(rBase, ra * sizeof(TValue) + offsetof(TValue, tt)));
build.cmp(tempw, LUA_TSTRING);
build.b(ConditionA64::Less, skip);
}
// isblack(obj2gco(o)) // isblack(obj2gco(o))
build.ldrb(tempw, mem(object, offsetof(GCheader, marked))); build.ldrb(tempw, mem(object, offsetof(GCheader, marked)));
@ -162,33 +166,15 @@ static bool emitBuiltin(
build.str(d0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n))); build.str(d0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
return true; return true;
case LBF_TYPE:
build.ldr(w0, mem(rBase, arg * sizeof(TValue) + offsetof(TValue, tt)));
build.ldr(x1, mem(rState, offsetof(lua_State, global)));
LUAU_ASSERT(sizeof(TString*) == 8);
build.add(x1, x1, zextReg(w0), 3);
build.ldr(x0, mem(x1, offsetof(global_State, ttname)));
build.str(x0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.gc)));
return true;
case LBF_TYPEOF:
build.mov(x0, rState);
build.add(x1, rBase, uint16_t(arg * sizeof(TValue)));
build.ldr(x2, mem(rNativeContext, offsetof(NativeContext, luaT_objtypenamestr)));
build.blr(x2);
build.str(x0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.gc)));
return true;
default: default:
LUAU_ASSERT(!"Missing A64 lowering"); LUAU_ASSERT(!"Missing A64 lowering");
return false; return false;
} }
} }
IrLoweringA64::IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function) IrLoweringA64::IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, IrFunction& function)
: build(build) : build(build)
, helpers(helpers) , helpers(helpers)
, data(data)
, function(function) , function(function)
, regs(function, {{x0, x15}, {x16, x17}, {q0, q7}, {q16, q31}}) , regs(function, {{x0, x15}, {x16, x17}, {q0, q7}, {q16, q31}})
, valueTracker(function) , valueTracker(function)
@ -1004,7 +990,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
build.str(temp3, temp2); build.str(temp3, temp2);
Label skip; Label skip;
checkObjectBarrierConditions(build, temp1, temp2, vmRegOp(inst.b), skip); checkObjectBarrierConditions(build, temp1, temp2, vmRegOp(inst.b), /* ratag */ -1, skip);
size_t spills = regs.spill(build, index, {temp1}); size_t spills = regs.spill(build, index, {temp1});
@ -1210,7 +1196,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
RegisterA64 temp = regs.allocTemp(KindA64::x); RegisterA64 temp = regs.allocTemp(KindA64::x);
Label skip; Label skip;
checkObjectBarrierConditions(build, regOp(inst.a), temp, vmRegOp(inst.b), skip); checkObjectBarrierConditions(build, regOp(inst.a), temp, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
RegisterA64 reg = regOp(inst.a); // note: we need to call regOp before spill so that we don't do redundant reloads RegisterA64 reg = regOp(inst.a); // note: we need to call regOp before spill so that we don't do redundant reloads
size_t spills = regs.spill(build, index, {reg}); size_t spills = regs.spill(build, index, {reg});
@ -1254,7 +1240,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
RegisterA64 temp = regs.allocTemp(KindA64::x); RegisterA64 temp = regs.allocTemp(KindA64::x);
Label skip; Label skip;
checkObjectBarrierConditions(build, regOp(inst.a), temp, vmRegOp(inst.b), skip); checkObjectBarrierConditions(build, regOp(inst.a), temp, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
RegisterA64 reg = regOp(inst.a); // note: we need to call regOp before spill so that we don't do redundant reloads RegisterA64 reg = regOp(inst.a); // note: we need to call regOp before spill so that we don't do redundant reloads
size_t spills = regs.spill(build, index, {reg}); size_t spills = regs.spill(build, index, {reg});
@ -1710,6 +1696,34 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
inst.regA64 = regs.takeReg(d0, index); inst.regA64 = regs.takeReg(d0, index);
break; break;
} }
case IrCmd::GET_TYPE:
{
inst.regA64 = regs.allocReg(KindA64::x, index);
build.ldr(inst.regA64, mem(rState, offsetof(lua_State, global)));
LUAU_ASSERT(sizeof(TString*) == 8);
if (inst.a.kind == IrOpKind::Inst)
build.add(inst.regA64, inst.regA64, zextReg(regOp(inst.a)), 3);
else if (inst.a.kind == IrOpKind::Constant)
build.add(inst.regA64, inst.regA64, uint16_t(tagOp(inst.a)) * 8);
else
LUAU_ASSERT(!"Unsupported instruction form");
build.ldr(inst.regA64, mem(inst.regA64, offsetof(global_State, ttname)));
break;
}
case IrCmd::GET_TYPEOF:
{
regs.spill(build, index);
build.mov(x0, rState);
build.add(x1, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
build.ldr(x2, mem(rNativeContext, offsetof(NativeContext, luaT_objtypenamestr)));
build.blr(x2);
inst.regA64 = regs.takeReg(x0, index);
break;
}
// To handle unsupported instructions, add "case IrCmd::OP" and make sure to set error = true! // To handle unsupported instructions, add "case IrCmd::OP" and make sure to set error = true!
} }

View File

@ -15,7 +15,6 @@ namespace CodeGen
{ {
struct ModuleHelpers; struct ModuleHelpers;
struct NativeState;
struct AssemblyOptions; struct AssemblyOptions;
namespace A64 namespace A64
@ -23,7 +22,7 @@ namespace A64
struct IrLoweringA64 struct IrLoweringA64
{ {
IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function); IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, IrBlock& next); void lowerInst(IrInst& inst, uint32_t index, IrBlock& next);
void finishBlock(); void finishBlock();
@ -63,7 +62,6 @@ struct IrLoweringA64
AssemblyBuilderA64& build; AssemblyBuilderA64& build;
ModuleHelpers& helpers; ModuleHelpers& helpers;
NativeState& data;
IrFunction& function; IrFunction& function;

View File

@ -22,10 +22,9 @@ namespace CodeGen
namespace X64 namespace X64
{ {
IrLoweringX64::IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function) IrLoweringX64::IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, IrFunction& function)
: build(build) : build(build)
, helpers(helpers) , helpers(helpers)
, data(data)
, function(function) , function(function)
, regs(build, function) , regs(build, function)
, valueTracker(function) , valueTracker(function)
@ -872,7 +871,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
tmp1.free(); tmp1.free();
callBarrierObject(regs, build, tmp2.release(), {}, vmRegOp(inst.b)); callBarrierObject(regs, build, tmp2.release(), {}, vmRegOp(inst.b), /* ratag */ -1);
break; break;
} }
case IrCmd::PREPARE_FORN: case IrCmd::PREPARE_FORN:
@ -983,7 +982,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
callStepGc(regs, build); callStepGc(regs, build);
break; break;
case IrCmd::BARRIER_OBJ: case IrCmd::BARRIER_OBJ:
callBarrierObject(regs, build, regOp(inst.a), inst.a, vmRegOp(inst.b)); callBarrierObject(regs, build, regOp(inst.a), inst.a, vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c));
break; break;
case IrCmd::BARRIER_TABLE_BACK: case IrCmd::BARRIER_TABLE_BACK:
callBarrierTableFast(regs, build, regOp(inst.a), inst.a); callBarrierTableFast(regs, build, regOp(inst.a), inst.a);
@ -993,7 +992,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
Label skip; Label skip;
ScopedRegX64 tmp{regs, SizeX64::qword}; ScopedRegX64 tmp{regs, SizeX64::qword};
checkObjectBarrierConditions(build, tmp.reg, regOp(inst.a), vmRegOp(inst.b), skip); checkObjectBarrierConditions(build, tmp.reg, regOp(inst.a), vmRegOp(inst.b), inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
{ {
ScopedSpills spillGuard(regs); ScopedSpills spillGuard(regs);
@ -1350,6 +1349,30 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
inst.regX64 = regs.takeReg(xmm0, index); inst.regX64 = regs.takeReg(xmm0, index);
break; break;
} }
case IrCmd::GET_TYPE:
{
inst.regX64 = regs.allocReg(SizeX64::qword, index);
build.mov(inst.regX64, qword[rState + offsetof(lua_State, global)]);
if (inst.a.kind == IrOpKind::Inst)
build.mov(inst.regX64, qword[inst.regX64 + qwordReg(regOp(inst.a)) * sizeof(TString*) + offsetof(global_State, ttname)]);
else if (inst.a.kind == IrOpKind::Constant)
build.mov(inst.regX64, qword[inst.regX64 + tagOp(inst.a) * sizeof(TString*) + offsetof(global_State, ttname)]);
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
}
case IrCmd::GET_TYPEOF:
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, luauRegAddress(vmRegOp(inst.a)));
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaT_objtypenamestr)]);
inst.regX64 = regs.takeReg(rax, index);
break;
}
// Pseudo instructions // Pseudo instructions
case IrCmd::NOP: case IrCmd::NOP:
@ -1376,7 +1399,7 @@ void IrLoweringX64::finishFunction()
for (InterruptHandler& handler : interruptHandlers) for (InterruptHandler& handler : interruptHandlers)
{ {
build.setLabel(handler.self); build.setLabel(handler.self);
build.mov(rax, handler.pcpos + 1); build.mov(eax, handler.pcpos + 1);
build.lea(rbx, handler.next); build.lea(rbx, handler.next);
build.jmp(helpers.interrupt); build.jmp(helpers.interrupt);
} }

View File

@ -17,7 +17,6 @@ namespace CodeGen
{ {
struct ModuleHelpers; struct ModuleHelpers;
struct NativeState;
struct AssemblyOptions; struct AssemblyOptions;
namespace X64 namespace X64
@ -25,7 +24,7 @@ namespace X64
struct IrLoweringX64 struct IrLoweringX64
{ {
IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function); IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, IrBlock& next); void lowerInst(IrInst& inst, uint32_t index, IrBlock& next);
void finishBlock(); void finishBlock();
@ -63,7 +62,6 @@ struct IrLoweringX64
AssemblyBuilderX64& build; AssemblyBuilderX64& build;
ModuleHelpers& helpers; ModuleHelpers& helpers;
NativeState& data;
IrFunction& function; IrFunction& function;

View File

@ -344,8 +344,10 @@ static BuiltinImplResult translateBuiltinType(IrBuilder& build, int nparams, int
if (nparams < 1 || nresults > 1) if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1}; return {BuiltinImplType::None, -1};
build.inst(IrCmd::FASTCALL, build.constUint(LBF_TYPE), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(1)); IrOp tag = build.inst(IrCmd::LOAD_TAG, build.vmReg(arg));
IrOp name = build.inst(IrCmd::GET_TYPE, tag);
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra), name);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TSTRING)); build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TSTRING));
return {BuiltinImplType::UsesFallback, 1}; return {BuiltinImplType::UsesFallback, 1};
@ -356,8 +358,9 @@ static BuiltinImplResult translateBuiltinTypeof(IrBuilder& build, int nparams, i
if (nparams < 1 || nresults > 1) if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1}; return {BuiltinImplType::None, -1};
build.inst(IrCmd::FASTCALL, build.constUint(LBF_TYPEOF), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(1)); IrOp name = build.inst(IrCmd::GET_TYPEOF, build.vmReg(arg));
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra), name);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TSTRING)); build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TSTRING));
return {BuiltinImplType::UsesFallback, 1}; return {BuiltinImplType::UsesFallback, 1};

View File

@ -825,7 +825,7 @@ void translateInstSetTableN(IrBuilder& build, const Instruction* pc, int pcpos)
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra)); IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
build.inst(IrCmd::STORE_TVALUE, arrEl, tva); build.inst(IrCmd::STORE_TVALUE, arrEl, tva);
build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra)); build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra), build.undef());
IrOp next = build.blockAtInst(pcpos + 1); IrOp next = build.blockAtInst(pcpos + 1);
FallbackStreamScope scope(build, fallback, next); FallbackStreamScope scope(build, fallback, next);
@ -902,7 +902,7 @@ void translateInstSetTable(IrBuilder& build, const Instruction* pc, int pcpos)
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra)); IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
build.inst(IrCmd::STORE_TVALUE, arrEl, tva); build.inst(IrCmd::STORE_TVALUE, arrEl, tva);
build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra)); build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra), build.undef());
IrOp next = build.blockAtInst(pcpos + 1); IrOp next = build.blockAtInst(pcpos + 1);
FallbackStreamScope scope(build, fallback, next); FallbackStreamScope scope(build, fallback, next);
@ -989,7 +989,7 @@ void translateInstSetTableKS(IrBuilder& build, const Instruction* pc, int pcpos)
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra)); IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
build.inst(IrCmd::STORE_NODE_VALUE_TV, addrSlotEl, tva); build.inst(IrCmd::STORE_NODE_VALUE_TV, addrSlotEl, tva);
build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra)); build.inst(IrCmd::BARRIER_TABLE_FORWARD, vb, build.vmReg(ra), build.undef());
IrOp next = build.blockAtInst(pcpos + 2); IrOp next = build.blockAtInst(pcpos + 2);
FallbackStreamScope scope(build, fallback, next); FallbackStreamScope scope(build, fallback, next);
@ -1036,7 +1036,7 @@ void translateInstSetGlobal(IrBuilder& build, const Instruction* pc, int pcpos)
IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra)); IrOp tva = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(ra));
build.inst(IrCmd::STORE_NODE_VALUE_TV, addrSlotEl, tva); build.inst(IrCmd::STORE_NODE_VALUE_TV, addrSlotEl, tva);
build.inst(IrCmd::BARRIER_TABLE_FORWARD, env, build.vmReg(ra)); build.inst(IrCmd::BARRIER_TABLE_FORWARD, env, build.vmReg(ra), build.undef());
IrOp next = build.blockAtInst(pcpos + 2); IrOp next = build.blockAtInst(pcpos + 2);
FallbackStreamScope scope(build, fallback, next); FallbackStreamScope scope(build, fallback, next);

View File

@ -159,6 +159,9 @@ IrValueKind getCmdValueKind(IrCmd cmd)
return IrValueKind::Int; return IrValueKind::Int;
case IrCmd::INVOKE_LIBM: case IrCmd::INVOKE_LIBM:
return IrValueKind::Double; return IrValueKind::Double;
case IrCmd::GET_TYPE:
case IrCmd::GET_TYPEOF:
return IrValueKind::Pointer;
} }
LUAU_UNREACHABLE(); LUAU_UNREACHABLE();

View File

@ -108,6 +108,7 @@ void IrValueLocationTracking::beforeInstLowering(IrInst& inst)
case IrCmd::FALLBACK_SETTABLEKS: case IrCmd::FALLBACK_SETTABLEKS:
case IrCmd::FALLBACK_PREPVARARGS: case IrCmd::FALLBACK_PREPVARARGS:
case IrCmd::ADJUST_STACK_TO_TOP: case IrCmd::ADJUST_STACK_TO_TOP:
case IrCmd::GET_TYPEOF:
break; break;
// These instrucitons read VmReg only after optimizeMemoryOperandsX64 // These instrucitons read VmReg only after optimizeMemoryOperandsX64

View File

@ -732,6 +732,8 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
// If the written object is not collectable, barrier is not required // If the written object is not collectable, barrier is not required
if (!isGCO(tag)) if (!isGCO(tag))
kill(function, inst); kill(function, inst);
else
replace(function, inst.c, build.constTag(tag));
} }
} }
break; break;
@ -820,6 +822,8 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
case IrCmd::BITCOUNTLZ_UINT: case IrCmd::BITCOUNTLZ_UINT:
case IrCmd::BITCOUNTRZ_UINT: case IrCmd::BITCOUNTRZ_UINT:
case IrCmd::INVOKE_LIBM: case IrCmd::INVOKE_LIBM:
case IrCmd::GET_TYPE:
case IrCmd::GET_TYPEOF:
break; break;
case IrCmd::JUMP_CMP_ANY: case IrCmd::JUMP_CMP_ANY:

View File

@ -33,7 +33,7 @@ public:
class const_iterator; class const_iterator;
class iterator; class iterator;
DenseHashTable(const Key& empty_key, size_t buckets = 0) explicit DenseHashTable(const Key& empty_key, size_t buckets = 0)
: data(nullptr) : data(nullptr)
, capacity(0) , capacity(0)
, count(0) , count(0)
@ -477,7 +477,7 @@ public:
typedef typename Impl::const_iterator const_iterator; typedef typename Impl::const_iterator const_iterator;
typedef typename Impl::iterator iterator; typedef typename Impl::iterator iterator;
DenseHashSet(const Key& empty_key, size_t buckets = 0) explicit DenseHashSet(const Key& empty_key, size_t buckets = 0)
: impl(empty_key, buckets) : impl(empty_key, buckets)
{ {
} }
@ -546,7 +546,7 @@ public:
typedef typename Impl::const_iterator const_iterator; typedef typename Impl::const_iterator const_iterator;
typedef typename Impl::iterator iterator; typedef typename Impl::iterator iterator;
DenseHashMap(const Key& empty_key, size_t buckets = 0) explicit DenseHashMap(const Key& empty_key, size_t buckets = 0)
: impl(empty_key, buckets) : impl(empty_key, buckets)
{ {
} }
@ -584,6 +584,22 @@ public:
return impl.find(key) != 0; return impl.find(key) != 0;
} }
std::pair<Value&, bool> try_insert(const Key& key, const Value& value)
{
impl.rehash_if_full(key);
size_t before = impl.size();
std::pair<Key, Value>* slot = impl.insert_unsafe(key);
// Value is fresh if container count has increased
bool fresh = impl.size() > before;
if (fresh)
slot->second = value;
return std::make_pair(std::ref(slot->second), fresh);
}
size_t size() const size_t size() const
{ {
return impl.size(); return impl.size();

View File

@ -161,7 +161,7 @@ clean:
rm -rf $(BUILD) rm -rf $(BUILD)
rm -rf $(EXECUTABLE_ALIASES) rm -rf $(EXECUTABLE_ALIASES)
coverage: $(TESTS_TARGET) coverage: $(TESTS_TARGET) $(COMPILE_CLI_TARGET)
$(TESTS_TARGET) $(TESTS_TARGET)
mv default.profraw tests.profraw mv default.profraw tests.profraw
$(TESTS_TARGET) --fflags=true $(TESTS_TARGET) --fflags=true
@ -170,7 +170,11 @@ coverage: $(TESTS_TARGET)
mv default.profraw codegen.profraw mv default.profraw codegen.profraw
$(TESTS_TARGET) -ts=Conformance --codegen --fflags=true $(TESTS_TARGET) -ts=Conformance --codegen --fflags=true
mv default.profraw codegen-flags.profraw mv default.profraw codegen-flags.profraw
llvm-profdata merge tests.profraw tests-flags.profraw codegen.profraw codegen-flags.profraw -o default.profdata $(COMPILE_CLI_TARGET) --codegennull --target=a64 tests/conformance
mv default.profraw codegen-a64.profraw
$(COMPILE_CLI_TARGET) --codegennull --target=x64 tests/conformance
mv default.profraw codegen-x64.profraw
llvm-profdata merge *.profraw -o default.profdata
rm *.profraw rm *.profraw
llvm-cov show -format=html -show-instantiations=false -show-line-counts=true -show-region-summary=false -ignore-filename-regex=\(tests\|extern\|CLI\)/.* -output-dir=coverage --instr-profile default.profdata build/coverage/luau-tests llvm-cov show -format=html -show-instantiations=false -show-line-counts=true -show-region-summary=false -ignore-filename-regex=\(tests\|extern\|CLI\)/.* -output-dir=coverage --instr-profile default.profdata build/coverage/luau-tests
llvm-cov report -ignore-filename-regex=\(tests\|extern\|CLI\)/.* -show-region-summary=false --instr-profile default.profdata build/coverage/luau-tests llvm-cov report -ignore-filename-regex=\(tests\|extern\|CLI\)/.* -show-region-summary=false --instr-profile default.profdata build/coverage/luau-tests

View File

@ -88,6 +88,7 @@ target_sources(Luau.CodeGen PRIVATE
CodeGen/src/CodeAllocator.cpp CodeGen/src/CodeAllocator.cpp
CodeGen/src/CodeBlockUnwind.cpp CodeGen/src/CodeBlockUnwind.cpp
CodeGen/src/CodeGen.cpp CodeGen/src/CodeGen.cpp
CodeGen/src/CodeGenAssembly.cpp
CodeGen/src/CodeGenUtils.cpp CodeGen/src/CodeGenUtils.cpp
CodeGen/src/CodeGenA64.cpp CodeGen/src/CodeGenA64.cpp
CodeGen/src/CodeGenX64.cpp CodeGen/src/CodeGenX64.cpp
@ -115,6 +116,7 @@ target_sources(Luau.CodeGen PRIVATE
CodeGen/src/BitUtils.h CodeGen/src/BitUtils.h
CodeGen/src/ByteUtils.h CodeGen/src/ByteUtils.h
CodeGen/src/CodeGenLower.h
CodeGen/src/CodeGenUtils.h CodeGen/src/CodeGenUtils.h
CodeGen/src/CodeGenA64.h CodeGen/src/CodeGenA64.h
CodeGen/src/CodeGenX64.h CodeGen/src/CodeGenX64.h

View File

@ -31,7 +31,7 @@ static uint64_t modelFunction(const char* source)
AstStatFunction* func = result.root->body.data[0]->as<AstStatFunction>(); AstStatFunction* func = result.root->body.data[0]->as<AstStatFunction>();
REQUIRE(func); REQUIRE(func);
return Luau::Compile::modelCost(func->func->body, func->func->args.data, func->func->args.size, {nullptr}); return Luau::Compile::modelCost(func->func->body, func->func->args.data, func->func->args.size, DenseHashMap<AstExprCall*, int>{nullptr});
} }
TEST_CASE("Expression") TEST_CASE("Expression")

View File

@ -1005,9 +1005,9 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SkipUselessBarriers")
build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber)); build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber));
IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmReg(1)); IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmReg(1));
build.inst(IrCmd::BARRIER_TABLE_FORWARD, table, build.vmReg(0)); build.inst(IrCmd::BARRIER_TABLE_FORWARD, table, build.vmReg(0), build.undef());
IrOp something = build.inst(IrCmd::LOAD_POINTER, build.vmReg(2)); IrOp something = build.inst(IrCmd::LOAD_POINTER, build.vmReg(2));
build.inst(IrCmd::BARRIER_OBJ, something, build.vmReg(0)); build.inst(IrCmd::BARRIER_OBJ, something, build.vmReg(0), build.undef());
build.inst(IrCmd::RETURN, build.constUint(0)); build.inst(IrCmd::RETURN, build.constUint(0));
updateUseCounts(build.function); updateUseCounts(build.function);

View File

@ -409,9 +409,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_clone_reexports")
{ {
ScopedFastFlag flags[] = { ScopedFastFlag flags[] = {
{"LuauClonePublicInterfaceLess2", true}, {"LuauClonePublicInterfaceLess2", true},
{"LuauSubstitutionReentrant", true},
{"LuauClassTypeVarsInSubstitution", true},
{"LuauSubstitutionFixMissingFields", true},
}; };
fileResolver.source["Module/A"] = R"( fileResolver.source["Module/A"] = R"(
@ -447,9 +444,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_clone_types_of_reexported_values")
{ {
ScopedFastFlag flags[] = { ScopedFastFlag flags[] = {
{"LuauClonePublicInterfaceLess2", true}, {"LuauClonePublicInterfaceLess2", true},
{"LuauSubstitutionReentrant", true},
{"LuauClassTypeVarsInSubstitution", true},
{"LuauSubstitutionFixMissingFields", true},
}; };
fileResolver.source["Module/A"] = R"( fileResolver.source["Module/A"] = R"(

View File

@ -140,8 +140,8 @@ TEST_CASE_FIXTURE(FamilyFixture, "unsolvable_family")
local b = impossible(true) local b = impossible(true)
)"); )");
LUAU_REQUIRE_ERROR_COUNT(4, result); LUAU_REQUIRE_ERROR_COUNT(2, result);
for (size_t i = 0; i < 4; ++i) for (size_t i = 0; i < 2; ++i)
{ {
CHECK(toString(result.errors[i]) == "Type family instance Swap<a> is uninhabited"); CHECK(toString(result.errors[i]) == "Type family instance Swap<a> is uninhabited");
} }

View File

@ -8,7 +8,6 @@
using namespace Luau; using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution) LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAG(LuauTypeMismatchInvarianceInError)
TEST_SUITE_BEGIN("TypeAliases"); TEST_SUITE_BEGIN("TypeAliases");
@ -199,15 +198,9 @@ TEST_CASE_FIXTURE(Fixture, "generic_aliases")
LUAU_REQUIRE_ERROR_COUNT(1, result); LUAU_REQUIRE_ERROR_COUNT(1, result);
const char* expectedError; const char* expectedError = "Type 'bad' could not be converted into 'T<number>'\n"
if (FFlag::LuauTypeMismatchInvarianceInError) "caused by:\n"
expectedError = "Type 'bad' could not be converted into 'T<number>'\n" " Property 'v' is not compatible. Type 'string' could not be converted into 'number' in an invariant context";
"caused by:\n"
" Property 'v' is not compatible. Type 'string' could not be converted into 'number' in an invariant context";
else
expectedError = "Type 'bad' could not be converted into 'T<number>'\n"
"caused by:\n"
" Property 'v' is not compatible. Type 'string' could not be converted into 'number'";
CHECK(result.errors[0].location == Location{{4, 31}, {4, 44}}); CHECK(result.errors[0].location == Location{{4, 31}, {4, 44}});
CHECK(toString(result.errors[0]) == expectedError); CHECK(toString(result.errors[0]) == expectedError);
@ -226,19 +219,11 @@ TEST_CASE_FIXTURE(Fixture, "dependent_generic_aliases")
LUAU_REQUIRE_ERROR_COUNT(1, result); LUAU_REQUIRE_ERROR_COUNT(1, result);
std::string expectedError; std::string expectedError = "Type 'bad' could not be converted into 'U<number>'\n"
if (FFlag::LuauTypeMismatchInvarianceInError) "caused by:\n"
expectedError = "Type 'bad' could not be converted into 'U<number>'\n" " Property 't' is not compatible. Type '{ v: string }' could not be converted into 'T<number>'\n"
"caused by:\n" "caused by:\n"
" Property 't' is not compatible. Type '{ v: string }' could not be converted into 'T<number>'\n" " Property 'v' is not compatible. Type 'string' could not be converted into 'number' in an invariant context";
"caused by:\n"
" Property 'v' is not compatible. Type 'string' could not be converted into 'number' in an invariant context";
else
expectedError = "Type 'bad' could not be converted into 'U<number>'\n"
"caused by:\n"
" Property 't' is not compatible. Type '{ v: string }' could not be converted into 'T<number>'\n"
"caused by:\n"
" Property 'v' is not compatible. Type 'string' could not be converted into 'number'";
CHECK(result.errors[0].location == Location{{4, 31}, {4, 52}}); CHECK(result.errors[0].location == Location{{4, 31}, {4, 52}});
CHECK(toString(result.errors[0]) == expectedError); CHECK(toString(result.errors[0]) == expectedError);

View File

@ -108,10 +108,7 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_error2")
end end
)"); )");
if (FFlag::DebugLuauDeferredConstraintResolution) LUAU_REQUIRE_ERROR_COUNT(1, result);
LUAU_REQUIRE_ERROR_COUNT(2, result);
else
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("*error-type*", toString(requireType("a"))); CHECK_EQ("*error-type*", toString(requireType("a")));
} }

View File

@ -12,8 +12,6 @@
using namespace Luau; using namespace Luau;
using std::nullopt; using std::nullopt;
LUAU_FASTFLAG(LuauTypeMismatchInvarianceInError);
TEST_SUITE_BEGIN("TypeInferClasses"); TEST_SUITE_BEGIN("TypeInferClasses");
TEST_CASE_FIXTURE(ClassFixture, "call_method_of_a_class") TEST_CASE_FIXTURE(ClassFixture, "call_method_of_a_class")
@ -462,14 +460,9 @@ local b: B = a
)"); )");
LUAU_REQUIRE_ERRORS(result); LUAU_REQUIRE_ERRORS(result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by: caused by:
Property 'x' is not compatible. Type 'ChildClass' could not be converted into 'BaseClass' in an invariant context)"); Property 'x' is not compatible. Type 'ChildClass' could not be converted into 'BaseClass' in an invariant context)");
else
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by:
Property 'x' is not compatible. Type 'ChildClass' could not be converted into 'BaseClass')");
} }
TEST_CASE_FIXTURE(ClassFixture, "callable_classes") TEST_CASE_FIXTURE(ClassFixture, "callable_classes")

View File

@ -1312,10 +1312,6 @@ f(function(x) return x * 2 end)
TEST_CASE_FIXTURE(Fixture, "variadic_any_is_compatible_with_a_generic_TypePack") TEST_CASE_FIXTURE(Fixture, "variadic_any_is_compatible_with_a_generic_TypePack")
{ {
ScopedFastFlag sff[] = {
{"LuauVariadicAnyCanBeGeneric", true}
};
CheckResult result = check(R"( CheckResult result = check(R"(
--!strict --!strict
local function f(...) return ... end local function f(...) return ... end
@ -1328,8 +1324,6 @@ TEST_CASE_FIXTURE(Fixture, "variadic_any_is_compatible_with_a_generic_TypePack")
// https://github.com/Roblox/luau/issues/767 // https://github.com/Roblox/luau/issues/767
TEST_CASE_FIXTURE(BuiltinsFixture, "variadic_any_is_compatible_with_a_generic_TypePack_2") TEST_CASE_FIXTURE(BuiltinsFixture, "variadic_any_is_compatible_with_a_generic_TypePack_2")
{ {
ScopedFastFlag sff{"LuauVariadicAnyCanBeGeneric", true};
CheckResult result = check(R"( CheckResult result = check(R"(
local function somethingThatsAny(...: any) local function somethingThatsAny(...: any)
print(...) print(...)
@ -1920,8 +1914,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "dont_assert_when_the_tarjan_limit_is_exceede
ScopedFastFlag sff[] = { ScopedFastFlag sff[] = {
{"DebugLuauDeferredConstraintResolution", true}, {"DebugLuauDeferredConstraintResolution", true},
{"LuauClonePublicInterfaceLess2", true}, {"LuauClonePublicInterfaceLess2", true},
{"LuauSubstitutionReentrant", true},
{"LuauSubstitutionFixMissingFields", true},
{"LuauCloneSkipNonInternalVisit", true}, {"LuauCloneSkipNonInternalVisit", true},
}; };
@ -2089,4 +2081,19 @@ TEST_CASE_FIXTURE(Fixture, "attempt_to_call_an_intersection_of_tables")
CHECK_EQ(toString(result.errors[0]), "Cannot call non-function {| x: number |}"); CHECK_EQ(toString(result.errors[0]), "Cannot call non-function {| x: number |}");
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "attempt_to_call_an_intersection_of_tables_with_call_metamethod")
{
CheckResult result = check(R"(
type Callable = typeof(setmetatable({}, {
__call = function(self, ...) return ... end
}))
local function f(t: Callable & { x: number })
t()
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END(); TEST_SUITE_END();

View File

@ -10,7 +10,6 @@
#include "doctest.h" #include "doctest.h"
LUAU_FASTFLAG(LuauInstantiateInSubtyping) LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAG(LuauTypeMismatchInvarianceInError)
using namespace Luau; using namespace Luau;
@ -725,24 +724,12 @@ y.a.c = y
)"); )");
LUAU_REQUIRE_ERRORS(result); LUAU_REQUIRE_ERRORS(result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(toString(result.errors[0]),
{ R"(Type 'y' could not be converted into 'T<string>'
CHECK_EQ(toString(result.errors[0]),
R"(Type 'y' could not be converted into 'T<string>'
caused by: caused by:
Property 'a' is not compatible. Type '{ c: T<string>?, d: number }' could not be converted into 'U<string>' Property 'a' is not compatible. Type '{ c: T<string>?, d: number }' could not be converted into 'U<string>'
caused by: caused by:
Property 'd' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)"); Property 'd' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)");
}
else
{
CHECK_EQ(toString(result.errors[0]),
R"(Type 'y' could not be converted into 'T<string>'
caused by:
Property 'a' is not compatible. Type '{ c: T<string>?, d: number }' could not be converted into 'U<string>'
caused by:
Property 'd' is not compatible. Type 'number' could not be converted into 'string')");
}
} }
TEST_CASE_FIXTURE(Fixture, "generic_type_pack_unification1") TEST_CASE_FIXTURE(Fixture, "generic_type_pack_unification1")

View File

@ -539,10 +539,6 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_top_properties")
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_never_properties") TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_never_properties")
{ {
ScopedFastFlag sffs[]{
{"LuauUninhabitedSubAnything2", true},
};
CheckResult result = check(R"( CheckResult result = check(R"(
local x : { p : number?, q : never } & { p : never, q : string? } -- OK local x : { p : number?, q : never } & { p : never, q : string? } -- OK
local y : { p : never, q : never } = x -- OK local y : { p : never, q : never } = x -- OK

View File

@ -11,7 +11,6 @@
#include "doctest.h" #include "doctest.h"
LUAU_FASTFLAG(LuauInstantiateInSubtyping) LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAG(LuauTypeMismatchInvarianceInError)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution) LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
using namespace Luau; using namespace Luau;
@ -410,14 +409,9 @@ local b: B.T = a
CheckResult result = frontend.check("game/C"); CheckResult result = frontend.check("game/C");
LUAU_REQUIRE_ERROR_COUNT(1, result); LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(toString(result.errors[0]), R"(Type 'T' from 'game/A' could not be converted into 'T' from 'game/B'
CHECK_EQ(toString(result.errors[0]), R"(Type 'T' from 'game/A' could not be converted into 'T' from 'game/B'
caused by: caused by:
Property 'x' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)"); Property 'x' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)");
else
CHECK_EQ(toString(result.errors[0]), R"(Type 'T' from 'game/A' could not be converted into 'T' from 'game/B'
caused by:
Property 'x' is not compatible. Type 'number' could not be converted into 'string')");
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "module_type_conflict_instantiated") TEST_CASE_FIXTURE(BuiltinsFixture, "module_type_conflict_instantiated")
@ -449,14 +443,9 @@ local b: B.T = a
CheckResult result = frontend.check("game/D"); CheckResult result = frontend.check("game/D");
LUAU_REQUIRE_ERROR_COUNT(1, result); LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(toString(result.errors[0]), R"(Type 'T' from 'game/B' could not be converted into 'T' from 'game/C'
CHECK_EQ(toString(result.errors[0]), R"(Type 'T' from 'game/B' could not be converted into 'T' from 'game/C'
caused by: caused by:
Property 'x' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)"); Property 'x' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)");
else
CHECK_EQ(toString(result.errors[0]), R"(Type 'T' from 'game/B' could not be converted into 'T' from 'game/C'
caused by:
Property 'x' is not compatible. Type 'number' could not be converted into 'string')");
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "constrained_anyification_clone_immutable_types") TEST_CASE_FIXTURE(BuiltinsFixture, "constrained_anyification_clone_immutable_types")

View File

@ -26,17 +26,8 @@ TEST_CASE_FIXTURE(Fixture, "dont_suggest_using_colon_rather_than_dot_if_not_defi
someTable.Function1() -- Argument count mismatch someTable.Function1() -- Argument count mismatch
)"); )");
if (FFlag::DebugLuauDeferredConstraintResolution) LUAU_REQUIRE_ERROR_COUNT(1, result);
{ REQUIRE(get<CountMismatch>(result.errors[0]));
LUAU_REQUIRE_ERROR_COUNT(2, result);
CHECK(toString(result.errors[0]) == "No overload for function accepts 0 arguments.");
CHECK(toString(result.errors[1]) == "Available overloads: <a>(a) -> ()");
}
else
{
LUAU_REQUIRE_ERROR_COUNT(1, result);
REQUIRE(get<CountMismatch>(result.errors[0]));
}
} }
TEST_CASE_FIXTURE(Fixture, "dont_suggest_using_colon_rather_than_dot_if_it_wont_help_2") TEST_CASE_FIXTURE(Fixture, "dont_suggest_using_colon_rather_than_dot_if_it_wont_help_2")
@ -50,17 +41,8 @@ TEST_CASE_FIXTURE(Fixture, "dont_suggest_using_colon_rather_than_dot_if_it_wont_
someTable.Function2() -- Argument count mismatch someTable.Function2() -- Argument count mismatch
)"); )");
if (FFlag::DebugLuauDeferredConstraintResolution) LUAU_REQUIRE_ERROR_COUNT(1, result);
{ REQUIRE(get<CountMismatch>(result.errors[0]));
LUAU_REQUIRE_ERROR_COUNT(2, result);
CHECK(toString(result.errors[0]) == "No overload for function accepts 0 arguments.");
CHECK(toString(result.errors[1]) == "Available overloads: <a, b>(a, b) -> ()");
}
else
{
LUAU_REQUIRE_ERROR_COUNT(1, result);
REQUIRE(get<CountMismatch>(result.errors[0]));
}
} }
TEST_CASE_FIXTURE(Fixture, "dont_suggest_using_colon_rather_than_dot_if_another_overload_works") TEST_CASE_FIXTURE(Fixture, "dont_suggest_using_colon_rather_than_dot_if_another_overload_works")

View File

@ -1238,4 +1238,21 @@ TEST_CASE_FIXTURE(Fixture, "add_type_family_works")
CHECK(toString(result.errors[0]) == "Type family instance Add<string, string> is uninhabited"); CHECK(toString(result.errors[0]) == "Type family instance Add<string, string> is uninhabited");
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "normalize_strings_comparison")
{
CheckResult result = check(R"(
local function sortKeysForPrinting(a: any, b)
local typeofA = type(a)
local typeofB = type(b)
-- strings and numbers are sorted numerically/alphabetically
if typeofA == typeofB and (typeofA == "number" or typeofA == "string") then
return a < b
end
-- sort the rest by type name
return typeofA < typeofB
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END(); TEST_SUITE_END();

View File

@ -9,8 +9,6 @@
using namespace Luau; using namespace Luau;
LUAU_FASTFLAG(LuauTypeMismatchInvarianceInError)
TEST_SUITE_BEGIN("ProvisionalTests"); TEST_SUITE_BEGIN("ProvisionalTests");
// These tests check for behavior that differs from the final behavior we'd // These tests check for behavior that differs from the final behavior we'd
@ -793,20 +791,10 @@ TEST_CASE_FIXTURE(Fixture, "assign_table_with_refined_property_with_a_similar_ty
LUAU_REQUIRE_ERROR_COUNT(1, result); LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(R"(Type '{| x: number? |}' could not be converted into '{| x: number |}'
{
CHECK_EQ(R"(Type '{| x: number? |}' could not be converted into '{| x: number |}'
caused by: caused by:
Property 'x' is not compatible. Type 'number?' could not be converted into 'number' in an invariant context)", Property 'x' is not compatible. Type 'number?' could not be converted into 'number' in an invariant context)",
toString(result.errors[0])); toString(result.errors[0]));
}
else
{
CHECK_EQ(R"(Type '{| x: number? |}' could not be converted into '{| x: number |}'
caused by:
Property 'x' is not compatible. Type 'number?' could not be converted into 'number')",
toString(result.errors[0]));
}
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "table_insert_with_a_singleton_argument") TEST_CASE_FIXTURE(BuiltinsFixture, "table_insert_with_a_singleton_argument")
@ -856,10 +844,6 @@ TEST_CASE_FIXTURE(Fixture, "lookup_prop_of_intersection_containing_unions_of_tab
TEST_CASE_FIXTURE(Fixture, "expected_type_should_be_a_helpful_deduction_guide_for_function_calls") TEST_CASE_FIXTURE(Fixture, "expected_type_should_be_a_helpful_deduction_guide_for_function_calls")
{ {
ScopedFastFlag sffs[]{
{"LuauTypeMismatchInvarianceInError", true},
};
CheckResult result = check(R"( CheckResult result = check(R"(
type Ref<T> = { val: T } type Ref<T> = { val: T }
@ -947,10 +931,6 @@ TEST_CASE_FIXTURE(Fixture, "unify_more_complex_unions_that_include_nil")
TEST_CASE_FIXTURE(Fixture, "optional_class_instances_are_invariant") TEST_CASE_FIXTURE(Fixture, "optional_class_instances_are_invariant")
{ {
ScopedFastFlag sff[] = {
{"LuauTypeMismatchInvarianceInError", true}
};
createSomeClasses(&frontend); createSomeClasses(&frontend);
CheckResult result = check(R"( CheckResult result = check(R"(

View File

@ -17,7 +17,6 @@ using namespace Luau;
LUAU_FASTFLAG(LuauLowerBoundsCalculation); LUAU_FASTFLAG(LuauLowerBoundsCalculation);
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution); LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(LuauInstantiateInSubtyping) LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAG(LuauTypeMismatchInvarianceInError)
TEST_SUITE_BEGIN("TableTests"); TEST_SUITE_BEGIN("TableTests");
@ -2077,14 +2076,9 @@ local b: B = a
)"); )");
LUAU_REQUIRE_ERRORS(result); LUAU_REQUIRE_ERRORS(result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by: caused by:
Property 'y' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)"); Property 'y' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)");
else
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by:
Property 'y' is not compatible. Type 'number' could not be converted into 'string')");
} }
TEST_CASE_FIXTURE(Fixture, "error_detailed_prop_nested") TEST_CASE_FIXTURE(Fixture, "error_detailed_prop_nested")
@ -2101,18 +2095,11 @@ local b: B = a
)"); )");
LUAU_REQUIRE_ERRORS(result); LUAU_REQUIRE_ERRORS(result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by: caused by:
Property 'b' is not compatible. Type 'AS' could not be converted into 'BS' Property 'b' is not compatible. Type 'AS' could not be converted into 'BS'
caused by: caused by:
Property 'y' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)"); Property 'y' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)");
else
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by:
Property 'b' is not compatible. Type 'AS' could not be converted into 'BS'
caused by:
Property 'y' is not compatible. Type 'number' could not be converted into 'string')");
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "error_detailed_metatable_prop") TEST_CASE_FIXTURE(BuiltinsFixture, "error_detailed_metatable_prop")
@ -2128,18 +2115,11 @@ local c2: typeof(a2) = b2
)"); )");
LUAU_REQUIRE_ERROR_COUNT(2, result); LUAU_REQUIRE_ERROR_COUNT(2, result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(toString(result.errors[0]), R"(Type 'b1' could not be converted into 'a1'
CHECK_EQ(toString(result.errors[0]), R"(Type 'b1' could not be converted into 'a1'
caused by: caused by:
Type '{ x: number, y: string }' could not be converted into '{ x: number, y: number }' Type '{ x: number, y: string }' could not be converted into '{ x: number, y: number }'
caused by: caused by:
Property 'y' is not compatible. Type 'string' could not be converted into 'number' in an invariant context)"); Property 'y' is not compatible. Type 'string' could not be converted into 'number' in an invariant context)");
else
CHECK_EQ(toString(result.errors[0]), R"(Type 'b1' could not be converted into 'a1'
caused by:
Type '{ x: number, y: string }' could not be converted into '{ x: number, y: number }'
caused by:
Property 'y' is not compatible. Type 'string' could not be converted into 'number')");
if (FFlag::LuauInstantiateInSubtyping) if (FFlag::LuauInstantiateInSubtyping)
{ {
@ -2170,14 +2150,9 @@ TEST_CASE_FIXTURE(Fixture, "error_detailed_indexer_key")
)"); )");
LUAU_REQUIRE_ERRORS(result); LUAU_REQUIRE_ERRORS(result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by: caused by:
Property '[indexer key]' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)"); Property '[indexer key]' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)");
else
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by:
Property '[indexer key]' is not compatible. Type 'number' could not be converted into 'string')");
} }
TEST_CASE_FIXTURE(Fixture, "error_detailed_indexer_value") TEST_CASE_FIXTURE(Fixture, "error_detailed_indexer_value")
@ -2191,14 +2166,9 @@ TEST_CASE_FIXTURE(Fixture, "error_detailed_indexer_value")
)"); )");
LUAU_REQUIRE_ERRORS(result); LUAU_REQUIRE_ERRORS(result);
if (FFlag::LuauTypeMismatchInvarianceInError) CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by: caused by:
Property '[indexer value]' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)"); Property '[indexer value]' is not compatible. Type 'number' could not be converted into 'string' in an invariant context)");
else
CHECK_EQ(toString(result.errors[0]), R"(Type 'A' could not be converted into 'B'
caused by:
Property '[indexer value]' is not compatible. Type 'number' could not be converted into 'string')");
} }
TEST_CASE_FIXTURE(Fixture, "explicitly_typed_table") TEST_CASE_FIXTURE(Fixture, "explicitly_typed_table")
@ -2871,10 +2841,20 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "table_call_metamethod_must_be_callable")
)"); )");
LUAU_REQUIRE_ERROR_COUNT(1, result); LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK(result.errors[0] == TypeError{
Location{{5, 20}, {5, 21}}, if (FFlag::DebugLuauDeferredConstraintResolution)
CannotCallNonFunction{builtinTypes->numberType}, {
}); CHECK("Cannot call non-function { @metatable { __call: number }, { } }" == toString(result.errors[0]));
}
else
{
TypeError e{
Location{{5, 20}, {5, 21}},
CannotCallNonFunction{builtinTypes->numberType},
};
CHECK(result.errors[0] == e);
}
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "table_call_metamethod_generic") TEST_CASE_FIXTURE(BuiltinsFixture, "table_call_metamethod_generic")

View File

@ -1291,4 +1291,45 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "convoluted_case_where_two_TypeVars_were_boun
// If this code does not crash, we are in good shape. // If this code does not crash, we are in good shape.
} }
/*
* Under DCR we had an issue where constraint resolution resulted in the
* following:
*
* *blocked-55* ~ hasProp {- name: *blocked-55* -}, "name"
*
* This is a perfectly reasonable constraint, but one that doesn't actually
* constrain anything. When we encounter a constraint like this, we need to
* replace the result type by a free type that is scoped to the enclosing table.
*
* Conceptually, it's simplest to think of this constraint as one that is
* tautological. It does not actually contribute any new information.
*/
TEST_CASE_FIXTURE(Fixture, "handle_self_referential_HasProp_constraints")
{
CheckResult result = check(R"(
local function calculateTopBarHeight(props)
end
local function isTopPage(props)
local topMostOpaquePage
if props.avatarRoute then
topMostOpaquePage = props.avatarRoute.opaque.name
else
topMostOpaquePage = props.opaquePage
end
end
function TopBarContainer:updateTopBarHeight(prevProps, prevState)
calculateTopBarHeight(self.props)
isTopPage(self.props)
local topMostOpaquePage
if self.props.avatarRoute then
topMostOpaquePage = self.props.avatarRoute.opaque.name
-- ^--------------------------------^
else
topMostOpaquePage = self.props.opaquePage
end
end
)");
}
TEST_SUITE_END(); TEST_SUITE_END();

View File

@ -161,10 +161,6 @@ TEST_CASE_FIXTURE(TryUnifyFixture, "uninhabited_intersection_sub_anything")
TEST_CASE_FIXTURE(TryUnifyFixture, "uninhabited_table_sub_never") TEST_CASE_FIXTURE(TryUnifyFixture, "uninhabited_table_sub_never")
{ {
ScopedFastFlag sffs[]{
{"LuauUninhabitedSubAnything2", true},
};
CheckResult result = check(R"( CheckResult result = check(R"(
function f(arg : { prop : string & number }) : never function f(arg : { prop : string & number }) : never
return arg return arg
@ -175,10 +171,6 @@ TEST_CASE_FIXTURE(TryUnifyFixture, "uninhabited_table_sub_never")
TEST_CASE_FIXTURE(TryUnifyFixture, "uninhabited_table_sub_anything") TEST_CASE_FIXTURE(TryUnifyFixture, "uninhabited_table_sub_anything")
{ {
ScopedFastFlag sffs[]{
{"LuauUninhabitedSubAnything2", true},
};
CheckResult result = check(R"( CheckResult result = check(R"(
function f(arg : { prop : string & number }) : boolean function f(arg : { prop : string & number }) : boolean
return arg return arg

View File

@ -5,8 +5,6 @@ BuiltinTests.assert_removes_falsy_types2
BuiltinTests.assert_removes_falsy_types_even_from_type_pack_tail_but_only_for_the_first_type BuiltinTests.assert_removes_falsy_types_even_from_type_pack_tail_but_only_for_the_first_type
BuiltinTests.assert_returns_false_and_string_iff_it_knows_the_first_argument_cannot_be_truthy BuiltinTests.assert_returns_false_and_string_iff_it_knows_the_first_argument_cannot_be_truthy
BuiltinTests.bad_select_should_not_crash BuiltinTests.bad_select_should_not_crash
BuiltinTests.gmatch_definition
BuiltinTests.math_max_checks_for_numbers
BuiltinTests.select_slightly_out_of_range BuiltinTests.select_slightly_out_of_range
BuiltinTests.select_way_out_of_range BuiltinTests.select_way_out_of_range
BuiltinTests.set_metatable_needs_arguments BuiltinTests.set_metatable_needs_arguments
@ -16,6 +14,10 @@ BuiltinTests.string_format_correctly_ordered_types
BuiltinTests.string_format_report_all_type_errors_at_correct_positions BuiltinTests.string_format_report_all_type_errors_at_correct_positions
BuiltinTests.string_format_tostring_specifier_type_constraint BuiltinTests.string_format_tostring_specifier_type_constraint
BuiltinTests.string_format_use_correct_argument2 BuiltinTests.string_format_use_correct_argument2
BuiltinTests.table_pack
BuiltinTests.table_pack_reduce
BuiltinTests.table_pack_variadic
DefinitionTests.class_definition_indexer
DefinitionTests.class_definition_overload_metamethods DefinitionTests.class_definition_overload_metamethods
DefinitionTests.class_definition_string_props DefinitionTests.class_definition_string_props
GenericsTests.better_mismatch_error_messages GenericsTests.better_mismatch_error_messages
@ -71,6 +73,7 @@ TableTests.expected_indexer_value_type_extra_2
TableTests.explicitly_typed_table TableTests.explicitly_typed_table
TableTests.explicitly_typed_table_with_indexer TableTests.explicitly_typed_table_with_indexer
TableTests.fuzz_table_unify_instantiated_table TableTests.fuzz_table_unify_instantiated_table
TableTests.fuzz_table_unify_instantiated_table_with_prop_realloc
TableTests.generic_table_instantiation_potential_regression TableTests.generic_table_instantiation_potential_regression
TableTests.give_up_after_one_metatable_index_look_up TableTests.give_up_after_one_metatable_index_look_up
TableTests.indexer_on_sealed_table_must_unify_with_free_table TableTests.indexer_on_sealed_table_must_unify_with_free_table
@ -93,6 +96,7 @@ TableTests.shared_selfs
TableTests.shared_selfs_from_free_param TableTests.shared_selfs_from_free_param
TableTests.shared_selfs_through_metatables TableTests.shared_selfs_through_metatables
TableTests.table_call_metamethod_basic TableTests.table_call_metamethod_basic
TableTests.table_call_metamethod_generic
TableTests.table_simple_call TableTests.table_simple_call
TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors
TableTests.used_colon_instead_of_dot TableTests.used_colon_instead_of_dot
@ -127,6 +131,7 @@ TypeInfer.tc_after_error_recovery_no_replacement_name_in_error
TypeInfer.type_infer_recursion_limit_no_ice TypeInfer.type_infer_recursion_limit_no_ice
TypeInfer.type_infer_recursion_limit_normalizer TypeInfer.type_infer_recursion_limit_normalizer
TypeInferAnyError.for_in_loop_iterator_is_any2 TypeInferAnyError.for_in_loop_iterator_is_any2
TypeInferClasses.callable_classes
TypeInferClasses.class_type_mismatch_with_name_conflict TypeInferClasses.class_type_mismatch_with_name_conflict
TypeInferClasses.index_instance_property TypeInferClasses.index_instance_property
TypeInferFunctions.cannot_hoist_interior_defns_into_signature TypeInferFunctions.cannot_hoist_interior_defns_into_signature
@ -161,8 +166,6 @@ TypeInferModules.module_type_conflict
TypeInferModules.module_type_conflict_instantiated TypeInferModules.module_type_conflict_instantiated
TypeInferOOP.inferring_hundreds_of_self_calls_should_not_suffocate_memory TypeInferOOP.inferring_hundreds_of_self_calls_should_not_suffocate_memory
TypeInferOOP.methods_are_topologically_sorted TypeInferOOP.methods_are_topologically_sorted
TypeInferOperators.CallAndOrOfFunctions
TypeInferOperators.CallOrOfFunctions
TypeInferOperators.cli_38355_recursive_union TypeInferOperators.cli_38355_recursive_union
TypeInferOperators.compound_assign_mismatch_metatable TypeInferOperators.compound_assign_mismatch_metatable
TypeInferOperators.disallow_string_and_types_without_metatables_from_arithmetic_binary_ops TypeInferOperators.disallow_string_and_types_without_metatables_from_arithmetic_binary_ops
@ -179,8 +182,6 @@ TypePackTests.detect_cyclic_typepacks2
TypePackTests.pack_tail_unification_check TypePackTests.pack_tail_unification_check
TypePackTests.type_alias_backwards_compatible TypePackTests.type_alias_backwards_compatible
TypePackTests.type_alias_default_type_errors TypePackTests.type_alias_default_type_errors
TypePackTests.unify_variadic_tails_in_arguments
TypePackTests.variadic_packs
TypeSingletons.function_call_with_singletons TypeSingletons.function_call_with_singletons
TypeSingletons.function_call_with_singletons_mismatch TypeSingletons.function_call_with_singletons_mismatch
TypeSingletons.no_widening_from_callsites TypeSingletons.no_widening_from_callsites
@ -192,5 +193,4 @@ TypeSingletons.widening_happens_almost_everywhere
UnionTypes.dont_allow_cyclic_unions_to_be_inferred UnionTypes.dont_allow_cyclic_unions_to_be_inferred
UnionTypes.generic_function_with_optional_arg UnionTypes.generic_function_with_optional_arg
UnionTypes.index_on_a_union_type_with_missing_property UnionTypes.index_on_a_union_type_with_missing_property
UnionTypes.optional_union_follow
UnionTypes.table_union_write_indirect UnionTypes.table_union_write_indirect