Sync to upstream/release/576 (#928)

* `ClassType` can now have an indexer defined on it. This allows custom
types to be used in `t[x]` expressions.
* Fixed search for closest executable breakpoint line. Previously,
breakpoints might have been skipped in `else` blocks at the end of a
function
* Fixed how unification is performed for two optional types `a? <: b?`,
previously it might have unified either 'a' or 'b' with 'nil'. Note that
this fix is not enabled by default yet (see the list in
`ExperimentalFlags.h`)

In the new type solver, a concept of 'Type Families' has been
introduced.
Type families can be thought of as type aliases with custom type
inference/reduction logic included with them.
For example, we can have an `Add<T, U>` type family that will resolve
the type that is the result of adding two values together.
This will help type inference to figure out what 'T' and 'U' might be
when explicit type annotations are not provided.
In this update we don't define any type families, but they will be added
in the near future.
It is also possible for Luau embedders to define their own type families
in the global/environment scope.

Other changes include:
* Fixed scope used to find out which generic types should be included in
the function generic type list
* Fixed a crash after cyclic bound types were created during unification

And in native code generation (jit):
* Use of arm64 target on M1 now requires macOS 13
* Entry into native code has been optimized. This is especially
important for coroutine call/pcall performance as they involve going
through a C call frame
* LOP_LOADK(X) translation into IR has been improved to enable type
tag/constant propagation
* arm64 can use integer immediate values to synthesize floating-point
values
* x64 assembler removes duplicate 64bit numbers from the data section to
save space
* Linux `perf` can now be used to profile native Luau code (when running
with --codegen-perf CLI argument)
This commit is contained in:
vegorov-rbx 2023-05-12 20:50:47 +03:00 committed by GitHub
parent 8d8c7974f5
commit 97965c7c0a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
62 changed files with 2869 additions and 276 deletions

View File

@ -198,9 +198,26 @@ struct UnpackConstraint
TypePackId sourcePack;
};
using ConstraintV = Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, UnaryConstraint,
BinaryConstraint, IterableConstraint, NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, PrimitiveTypeConstraint,
HasPropConstraint, SetPropConstraint, SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint>;
// ty ~ reduce ty
//
// Try to reduce ty, if it is a TypeFamilyInstanceType. Otherwise, do nothing.
struct ReduceConstraint
{
TypeId ty;
};
// tp ~ reduce tp
//
// Analogous to ReduceConstraint, but for type packs.
struct ReducePackConstraint
{
TypePackId tp;
};
using ConstraintV =
Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, UnaryConstraint, BinaryConstraint,
IterableConstraint, NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, PrimitiveTypeConstraint, HasPropConstraint,
SetPropConstraint, SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint, ReduceConstraint, ReducePackConstraint>;
struct Constraint
{

View File

@ -121,6 +121,8 @@ struct ConstraintSolver
bool tryDispatch(const SetIndexerConstraint& c, NotNull<const Constraint> constraint, bool force);
bool tryDispatch(const SingletonOrTopTypeConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const UnpackConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const ReduceConstraint& c, NotNull<const Constraint> constraint, bool force);
bool tryDispatch(const ReducePackConstraint& c, NotNull<const Constraint> constraint, bool force);
// for a, ... in some_table do
// also handles __iter metamethod

View File

@ -329,12 +329,27 @@ struct DynamicPropertyLookupOnClassesUnsafe
bool operator==(const DynamicPropertyLookupOnClassesUnsafe& rhs) const;
};
using TypeErrorData = Variant<TypeMismatch, UnknownSymbol, UnknownProperty, NotATable, CannotExtendTable, OnlyTablesCanHaveMethods,
DuplicateTypeDefinition, CountMismatch, FunctionDoesNotTakeSelf, FunctionRequiresSelf, OccursCheckFailed, UnknownRequire,
IncorrectGenericParameterCount, SyntaxError, CodeTooComplex, UnificationTooComplex, UnknownPropButFoundLikeProp, GenericError, InternalError,
CannotCallNonFunction, ExtraInformation, DeprecatedApiUsed, ModuleHasCyclicDependency, IllegalRequire, FunctionExitsWithoutReturning,
DuplicateGenericParameter, CannotInferBinaryOperation, MissingProperties, SwappedGenericTypeParameter, OptionalValueAccess, MissingUnionProperty,
TypesAreUnrelated, NormalizationTooComplex, TypePackMismatch, DynamicPropertyLookupOnClassesUnsafe>;
struct UninhabitedTypeFamily
{
TypeId ty;
bool operator==(const UninhabitedTypeFamily& rhs) const;
};
struct UninhabitedTypePackFamily
{
TypePackId tp;
bool operator==(const UninhabitedTypePackFamily& rhs) const;
};
using TypeErrorData =
Variant<TypeMismatch, UnknownSymbol, UnknownProperty, NotATable, CannotExtendTable, OnlyTablesCanHaveMethods, DuplicateTypeDefinition,
CountMismatch, FunctionDoesNotTakeSelf, FunctionRequiresSelf, OccursCheckFailed, UnknownRequire, IncorrectGenericParameterCount, SyntaxError,
CodeTooComplex, UnificationTooComplex, UnknownPropButFoundLikeProp, GenericError, InternalError, CannotCallNonFunction, ExtraInformation,
DeprecatedApiUsed, ModuleHasCyclicDependency, IllegalRequire, FunctionExitsWithoutReturning, DuplicateGenericParameter,
CannotInferBinaryOperation, MissingProperties, SwappedGenericTypeParameter, OptionalValueAccess, MissingUnionProperty, TypesAreUnrelated,
NormalizationTooComplex, TypePackMismatch, DynamicPropertyLookupOnClassesUnsafe, UninhabitedTypeFamily, UninhabitedTypePackFamily>;
struct TypeErrorSummary
{

View File

@ -2,6 +2,9 @@
#pragma once
#include "Luau/Type.h"
#include "Luau/DenseHash.h"
#include <vector>
namespace Luau
{
@ -10,6 +13,29 @@ struct TypeArena;
struct Scope;
void quantify(TypeId ty, TypeLevel level);
std::optional<TypeId> quantify(TypeArena* arena, TypeId ty, Scope* scope);
// TODO: This is eerily similar to the pattern that NormalizedClassType
// implements. We could, and perhaps should, merge them together.
template<typename K, typename V>
struct OrderedMap
{
std::vector<K> keys;
DenseHashMap<K, V> pairings{nullptr};
void push(K k, V v)
{
keys.push_back(k);
pairings[k] = v;
}
};
struct QuantifierResult
{
TypeId result;
OrderedMap<TypeId, TypeId> insertedGenerics;
OrderedMap<TypePackId, TypePackId> insertedGenericPacks;
};
std::optional<QuantifierResult> quantify(TypeArena* arena, TypeId ty, Scope* scope);
} // namespace Luau

View File

@ -19,6 +19,10 @@ struct PendingType
// The pending Type state.
Type pending;
// On very rare occasions, we need to delete an entry from the TxnLog.
// DenseHashMap does not afford that so we note its deadness here.
bool dead = false;
explicit PendingType(Type state)
: pending(std::move(state))
{
@ -61,10 +65,11 @@ T* getMutable(PendingTypePack* pending)
// Log of what TypeIds we are rebinding, to be committed later.
struct TxnLog
{
TxnLog()
explicit TxnLog(bool useScopes = false)
: typeVarChanges(nullptr)
, typePackChanges(nullptr)
, ownedSeen()
, useScopes(useScopes)
, sharedSeen(&ownedSeen)
{
}
@ -297,6 +302,12 @@ private:
void popSeen(TypeOrPackId lhs, TypeOrPackId rhs);
public:
// There is one spot in the code where TxnLog has to reconcile collisions
// between parallel logs. In that codepath, we have to work out which of two
// FreeTypes subsumes the other. If useScopes is false, the TypeLevel is
// used. Else we use the embedded Scope*.
bool useScopes = false;
// Used to avoid infinite recursion when types are cyclic.
// Shared with all the descendent TxnLogs.
std::vector<std::pair<TypeOrPackId, TypeOrPackId>>* sharedSeen;

View File

@ -23,6 +23,7 @@
LUAU_FASTINT(LuauTableTypeMaximumStringifierLength)
LUAU_FASTINT(LuauTypeMaximumStringifierLength)
LUAU_FASTFLAG(LuauTypecheckClassTypeIndexers)
namespace Luau
{
@ -31,6 +32,8 @@ struct TypeArena;
struct Scope;
using ScopePtr = std::shared_ptr<Scope>;
struct TypeFamily;
/**
* There are three kinds of type variables:
* - `Free` variables are metavariables, which stand for unconstrained types.
@ -489,6 +492,7 @@ struct ClassType
Tags tags;
std::shared_ptr<ClassUserData> userData;
ModuleName definitionModuleName;
std::optional<TableIndexer> indexer;
ClassType(Name name, Props props, std::optional<TypeId> parent, std::optional<TypeId> metatable, Tags tags,
std::shared_ptr<ClassUserData> userData, ModuleName definitionModuleName)
@ -501,6 +505,35 @@ struct ClassType
, definitionModuleName(definitionModuleName)
{
}
ClassType(Name name, Props props, std::optional<TypeId> parent, std::optional<TypeId> metatable, Tags tags,
std::shared_ptr<ClassUserData> userData, ModuleName definitionModuleName, std::optional<TableIndexer> indexer)
: name(name)
, props(props)
, parent(parent)
, metatable(metatable)
, tags(tags)
, userData(userData)
, definitionModuleName(definitionModuleName)
, indexer(indexer)
{
LUAU_ASSERT(FFlag::LuauTypecheckClassTypeIndexers);
}
};
/**
* An instance of a type family that has not yet been reduced to a more concrete
* type. The constraint solver receives a constraint to reduce each
* TypeFamilyInstanceType to a concrete type. A design detail is important to
* note here: the parameters for this instantiation of the type family are
* contained within this type, so that they can be substituted.
*/
struct TypeFamilyInstanceType
{
NotNull<TypeFamily> family;
std::vector<TypeId> typeArguments;
std::vector<TypePackId> packArguments;
};
struct TypeFun
@ -640,8 +673,9 @@ struct NegationType
using ErrorType = Unifiable::Error;
using TypeVariant = Unifiable::Variant<TypeId, FreeType, GenericType, PrimitiveType, BlockedType, PendingExpansionType, SingletonType, FunctionType,
TableType, MetatableType, ClassType, AnyType, UnionType, IntersectionType, LazyType, UnknownType, NeverType, NegationType>;
using TypeVariant =
Unifiable::Variant<TypeId, FreeType, GenericType, PrimitiveType, BlockedType, PendingExpansionType, SingletonType, FunctionType, TableType,
MetatableType, ClassType, AnyType, UnionType, IntersectionType, LazyType, UnknownType, NeverType, NegationType, TypeFamilyInstanceType>;
struct Type final
{

View File

@ -0,0 +1,115 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Error.h"
#include "Luau/NotNull.h"
#include "Luau/Variant.h"
#include <functional>
#include <string>
#include <optional>
namespace Luau
{
struct Type;
using TypeId = const Type*;
struct TypePackVar;
using TypePackId = const TypePackVar*;
struct TypeArena;
struct BuiltinTypes;
struct TxnLog;
/// Represents a reduction result, which may have successfully reduced the type,
/// may have concretely failed to reduce the type, or may simply be stuck
/// without more information.
template<typename Ty>
struct TypeFamilyReductionResult
{
/// The result of the reduction, if any. If this is nullopt, the family
/// could not be reduced.
std::optional<Ty> result;
/// Whether the result is uninhabited: whether we know, unambiguously and
/// permanently, whether this type family reduction results in an
/// uninhabitable type. This will trigger an error to be reported.
bool uninhabited;
/// Any types that need to be progressed or mutated before the reduction may
/// proceed.
std::vector<TypeId> blockedTypes;
/// Any type packs that need to be progressed or mutated before the
/// reduction may proceed.
std::vector<TypePackId> blockedPacks;
};
/// Represents a type function that may be applied to map a series of types and
/// type packs to a single output type.
struct TypeFamily
{
/// The human-readable name of the type family. Used to stringify instance
/// types.
std::string name;
/// The reducer function for the type family.
std::function<TypeFamilyReductionResult<TypeId>(
std::vector<TypeId>, std::vector<TypePackId>, NotNull<TypeArena>, NotNull<BuiltinTypes>, NotNull<const TxnLog> log)>
reducer;
};
/// Represents a type function that may be applied to map a series of types and
/// type packs to a single output type pack.
struct TypePackFamily
{
/// The human-readable name of the type pack family. Used to stringify
/// instance packs.
std::string name;
/// The reducer function for the type pack family.
std::function<TypeFamilyReductionResult<TypePackId>(
std::vector<TypeId>, std::vector<TypePackId>, NotNull<TypeArena>, NotNull<BuiltinTypes>, NotNull<const TxnLog> log)>
reducer;
};
struct FamilyGraphReductionResult
{
ErrorVec errors;
DenseHashSet<TypeId> blockedTypes{nullptr};
DenseHashSet<TypePackId> blockedPacks{nullptr};
DenseHashSet<TypeId> reducedTypes{nullptr};
DenseHashSet<TypePackId> reducedPacks{nullptr};
};
/**
* Attempt to reduce all instances of any type or type pack family in the type
* graph provided.
*
* @param entrypoint the entry point to the type graph.
* @param location the location the reduction is occurring at; used to populate
* type errors.
* @param arena an arena to allocate types into.
* @param builtins the built-in types.
* @param log a TxnLog to use. If one is provided, substitution will take place
* against the TxnLog, otherwise substitutions will directly mutate the type
* graph. Do not provide the empty TxnLog, as a result.
*/
FamilyGraphReductionResult reduceFamilies(
TypeId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log = nullptr, bool force = false);
/**
* Attempt to reduce all instances of any type or type pack family in the type
* graph provided.
*
* @param entrypoint the entry point to the type graph.
* @param location the location the reduction is occurring at; used to populate
* type errors.
* @param arena an arena to allocate types into.
* @param builtins the built-in types.
* @param log a TxnLog to use. If one is provided, substitution will take place
* against the TxnLog, otherwise substitutions will directly mutate the type
* graph. Do not provide the empty TxnLog, as a result.
*/
FamilyGraphReductionResult reduceFamilies(
TypePackId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log = nullptr, bool force = false);
} // namespace Luau

View File

@ -12,11 +12,13 @@ namespace Luau
{
struct TypeArena;
struct TypePackFamily;
struct TxnLog;
struct TypePack;
struct VariadicTypePack;
struct BlockedTypePack;
struct TypeFamilyInstanceTypePack;
struct TypePackVar;
using TypePackId = const TypePackVar*;
@ -50,10 +52,10 @@ struct GenericTypePack
};
using BoundTypePack = Unifiable::Bound<TypePackId>;
using ErrorTypePack = Unifiable::Error;
using TypePackVariant = Unifiable::Variant<TypePackId, FreeTypePack, GenericTypePack, TypePack, VariadicTypePack, BlockedTypePack>;
using TypePackVariant =
Unifiable::Variant<TypePackId, FreeTypePack, GenericTypePack, TypePack, VariadicTypePack, BlockedTypePack, TypeFamilyInstanceTypePack>;
/* A TypePack is a rope-like string of TypeIds. We use this structure to encode
* notions like packs of unknown length and packs of any length, as well as more
@ -83,6 +85,17 @@ struct BlockedTypePack
static size_t nextIndex;
};
/**
* Analogous to a TypeFamilyInstanceType.
*/
struct TypeFamilyInstanceTypePack
{
NotNull<TypePackFamily> family;
std::vector<TypeId> typeArguments;
std::vector<TypePackId> packArguments;
};
struct TypePackVar
{
explicit TypePackVar(const TypePackVariant& ty);

View File

@ -64,9 +64,11 @@ struct Unifier
Variance variance = Covariant;
bool normalize = true; // Normalize unions and intersections if necessary
bool checkInhabited = true; // Normalize types to check if they are inhabited
bool useScopes = false; // If true, we use the scope hierarchy rather than TypeLevels
CountMismatch::Context ctx = CountMismatch::Arg;
// If true, generics act as free types when unifying.
bool hideousFixMeGenericsAreActuallyFree = false;
UnifierSharedState& sharedState;
// When the Unifier is forced to unify two blocked types (or packs), they
@ -78,6 +80,10 @@ struct Unifier
Unifier(
NotNull<Normalizer> normalizer, Mode mode, NotNull<Scope> scope, const Location& location, Variance variance, TxnLog* parentLog = nullptr);
// Configure the Unifier to test for scope subsumption via embedded Scope
// pointers rather than TypeLevels.
void enableScopeTests();
// Test whether the two type vars unify. Never commits the result.
ErrorVec canUnify(TypeId subTy, TypeId superTy);
ErrorVec canUnify(TypePackId subTy, TypePackId superTy, bool isFunctionCall = false);
@ -159,6 +165,9 @@ private:
// Available after regular type pack unification errors
std::optional<int> firstPackErrorPos;
// If true, we use the scope hierarchy rather than TypeLevels
bool useScopes = false;
};
void promoteTypeLevels(TxnLog& log, const TypeArena* arena, TypeLevel minLevel, Scope* outerScope, bool useScope, TypePackId tp);

View File

@ -159,6 +159,10 @@ struct GenericTypeVisitor
{
return visit(ty);
}
virtual bool visit(TypeId ty, const TypeFamilyInstanceType& tfit)
{
return visit(ty);
}
virtual bool visit(TypePackId tp)
{
@ -192,6 +196,10 @@ struct GenericTypeVisitor
{
return visit(tp);
}
virtual bool visit(TypePackId tp, const TypeFamilyInstanceTypePack& tfitp)
{
return visit(tp);
}
void traverse(TypeId ty)
{
@ -272,6 +280,15 @@ struct GenericTypeVisitor
if (ctv->metatable)
traverse(*ctv->metatable);
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (ctv->indexer)
{
traverse(ctv->indexer->indexType);
traverse(ctv->indexer->indexResultType);
}
}
}
}
else if (auto atv = get<AnyType>(ty))
@ -327,6 +344,17 @@ struct GenericTypeVisitor
if (visit(ty, *ntv))
traverse(ntv->ty);
}
else if (auto tfit = get<TypeFamilyInstanceType>(ty))
{
if (visit(ty, *tfit))
{
for (TypeId p : tfit->typeArguments)
traverse(p);
for (TypePackId p : tfit->packArguments)
traverse(p);
}
}
else
LUAU_ASSERT(!"GenericTypeVisitor::traverse(TypeId) is not exhaustive!");
@ -376,6 +404,17 @@ struct GenericTypeVisitor
}
else if (auto btp = get<BlockedTypePack>(tp))
visit(tp, *btp);
else if (auto tfitp = get<TypeFamilyInstanceTypePack>(tp))
{
if (visit(tp, *tfitp))
{
for (TypeId t : tfitp->typeArguments)
traverse(t);
for (TypePackId t : tfitp->packArguments)
traverse(t);
}
}
else
LUAU_ASSERT(!"GenericTypeVisitor::traverse(TypePackId) is not exhaustive!");

View File

@ -52,6 +52,12 @@ Property clone(const Property& prop, TypeArena& dest, CloneState& cloneState)
}
}
static TableIndexer clone(const TableIndexer& indexer, TypeArena& dest, CloneState& cloneState)
{
LUAU_ASSERT(FFlag::LuauTypecheckClassTypeIndexers);
return TableIndexer{clone(indexer.indexType, dest, cloneState), clone(indexer.indexResultType, dest, cloneState)};
}
struct TypePackCloner;
/*
@ -98,6 +104,7 @@ struct TypeCloner
void operator()(const UnknownType& t);
void operator()(const NeverType& t);
void operator()(const NegationType& t);
void operator()(const TypeFamilyInstanceType& t);
};
struct TypePackCloner
@ -171,6 +178,22 @@ struct TypePackCloner
if (t.tail)
destTp->tail = clone(*t.tail, dest, cloneState);
}
void operator()(const TypeFamilyInstanceTypePack& t)
{
TypePackId cloned = dest.addTypePack(TypeFamilyInstanceTypePack{t.family, {}, {}});
TypeFamilyInstanceTypePack* destTp = getMutable<TypeFamilyInstanceTypePack>(cloned);
LUAU_ASSERT(destTp);
seenTypePacks[typePackId] = cloned;
destTp->typeArguments.reserve(t.typeArguments.size());
for (TypeId ty : t.typeArguments)
destTp->typeArguments.push_back(clone(ty, dest, cloneState));
destTp->packArguments.reserve(t.packArguments.size());
for (TypePackId tp : t.packArguments)
destTp->packArguments.push_back(clone(tp, dest, cloneState));
}
};
template<typename T>
@ -288,8 +311,16 @@ void TypeCloner::operator()(const TableType& t)
for (const auto& [name, prop] : t.props)
ttv->props[name] = clone(prop, dest, cloneState);
if (t.indexer)
ttv->indexer = TableIndexer{clone(t.indexer->indexType, dest, cloneState), clone(t.indexer->indexResultType, dest, cloneState)};
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (t.indexer)
ttv->indexer = clone(*t.indexer, dest, cloneState);
}
else
{
if (t.indexer)
ttv->indexer = TableIndexer{clone(t.indexer->indexType, dest, cloneState), clone(t.indexer->indexResultType, dest, cloneState)};
}
for (TypeId& arg : ttv->instantiatedTypeParams)
arg = clone(arg, dest, cloneState);
@ -327,6 +358,12 @@ void TypeCloner::operator()(const ClassType& t)
if (t.metatable)
ctv->metatable = clone(*t.metatable, dest, cloneState);
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (t.indexer)
ctv->indexer = clone(*t.indexer, dest, cloneState);
}
}
void TypeCloner::operator()(const AnyType& t)
@ -389,6 +426,28 @@ void TypeCloner::operator()(const NegationType& t)
asMutable(result)->ty = NegationType{ty};
}
void TypeCloner::operator()(const TypeFamilyInstanceType& t)
{
TypeId result = dest.addType(TypeFamilyInstanceType{
t.family,
{},
{},
});
seenTypes[typeId] = result;
TypeFamilyInstanceType* tfit = getMutable<TypeFamilyInstanceType>(result);
LUAU_ASSERT(tfit != nullptr);
tfit->typeArguments.reserve(t.typeArguments.size());
for (TypeId p : t.typeArguments)
tfit->typeArguments.push_back(clone(p, dest, cloneState));
tfit->packArguments.reserve(t.packArguments.size());
for (TypePackId p : t.packArguments)
tfit->packArguments.push_back(clone(p, dest, cloneState));
}
} // anonymous namespace
TypePackId clone(TypePackId tp, TypeArena& dest, CloneState& cloneState)

View File

@ -728,6 +728,7 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocalFun
});
addConstraint(scope, std::move(c));
module->astTypes[function->func] = functionType;
return ControlFlow::None;
}
@ -1475,7 +1476,7 @@ Inference ConstraintGraphBuilder::check(
Checkpoint endCheckpoint = checkpoint(this);
TypeId generalizedTy = arena->addType(BlockedType{});
NotNull<Constraint> gc = addConstraint(scope, expr->location, GeneralizationConstraint{generalizedTy, sig.signature});
NotNull<Constraint> gc = addConstraint(sig.signatureScope, expr->location, GeneralizationConstraint{generalizedTy, sig.signature});
forEachConstraint(startCheckpoint, endCheckpoint, this, [gc](const ConstraintPtr& constraint) {
gc->dependencies.emplace_back(constraint.get());

View File

@ -16,6 +16,7 @@
#include "Luau/Type.h"
#include "Luau/Unifier.h"
#include "Luau/VisitType.h"
#include "Luau/TypeFamily.h"
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolver, false);
LUAU_FASTFLAG(LuauRequirePathTrueModuleName)
@ -226,6 +227,32 @@ void dump(ConstraintSolver* cs, ToStringOptions& opts)
}
}
struct InstantiationQueuer : TypeOnceVisitor
{
ConstraintSolver* solver;
NotNull<Scope> scope;
Location location;
explicit InstantiationQueuer(NotNull<Scope> scope, const Location& location, ConstraintSolver* solver)
: solver(solver)
, scope(scope)
, location(location)
{
}
bool visit(TypeId ty, const PendingExpansionType& petv) override
{
solver->pushConstraint(scope, location, TypeAliasExpansionConstraint{ty});
return false;
}
bool visit(TypeId ty, const TypeFamilyInstanceType& tfit) override
{
solver->pushConstraint(scope, location, ReduceConstraint{ty});
return true;
}
};
ConstraintSolver::ConstraintSolver(NotNull<Normalizer> normalizer, NotNull<Scope> rootScope, std::vector<NotNull<Constraint>> constraints,
ModuleName moduleName, NotNull<ModuleResolver> moduleResolver, std::vector<RequireCycle> requireCycles, DcrLogger* logger)
: arena(normalizer->arena)
@ -441,6 +468,10 @@ bool ConstraintSolver::tryDispatch(NotNull<const Constraint> constraint, bool fo
success = tryDispatch(*sottc, constraint);
else if (auto uc = get<UnpackConstraint>(*constraint))
success = tryDispatch(*uc, constraint);
else if (auto rc = get<ReduceConstraint>(*constraint))
success = tryDispatch(*rc, constraint, force);
else if (auto rpc = get<ReducePackConstraint>(*constraint))
success = tryDispatch(*rpc, constraint, force);
else
LUAU_ASSERT(false);
@ -479,13 +510,19 @@ bool ConstraintSolver::tryDispatch(const GeneralizationConstraint& c, NotNull<co
else if (get<PendingExpansionType>(generalizedType))
return block(generalizedType, constraint);
std::optional<TypeId> generalized = quantify(arena, c.sourceType, constraint->scope);
std::optional<QuantifierResult> generalized = quantify(arena, c.sourceType, constraint->scope);
if (generalized)
{
if (get<BlockedType>(generalizedType))
asMutable(generalizedType)->ty.emplace<BoundType>(*generalized);
asMutable(generalizedType)->ty.emplace<BoundType>(generalized->result);
else
unify(generalizedType, *generalized, constraint->scope);
unify(generalizedType, generalized->result, constraint->scope);
for (auto [free, gen] : generalized->insertedGenerics.pairings)
unify(free, gen, constraint->scope);
for (auto [free, gen] : generalized->insertedGenericPacks.pairings)
unify(free, gen, constraint->scope);
}
else
{
@ -504,6 +541,9 @@ bool ConstraintSolver::tryDispatch(const InstantiationConstraint& c, NotNull<con
if (isBlocked(c.superType))
return block(c.superType, constraint);
if (!recursiveBlock(c.superType, constraint))
return false;
Instantiation inst(TxnLog::empty(), arena, TypeLevel{}, constraint->scope);
std::optional<TypeId> instantiated = inst.substitute(c.superType);
@ -512,6 +552,9 @@ bool ConstraintSolver::tryDispatch(const InstantiationConstraint& c, NotNull<con
LUAU_ASSERT(get<BlockedType>(c.subType));
asMutable(c.subType)->ty.emplace<BoundType>(*instantiated);
InstantiationQueuer queuer{constraint->scope, constraint->location, this};
queuer.traverse(c.subType);
unblock(c.subType);
return true;
@ -953,26 +996,6 @@ struct InfiniteTypeFinder : TypeOnceVisitor
}
};
struct InstantiationQueuer : TypeOnceVisitor
{
ConstraintSolver* solver;
NotNull<Scope> scope;
Location location;
explicit InstantiationQueuer(NotNull<Scope> scope, const Location& location, ConstraintSolver* solver)
: solver(solver)
, scope(scope)
, location(location)
{
}
bool visit(TypeId ty, const PendingExpansionType& petv) override
{
solver->pushConstraint(scope, location, TypeAliasExpansionConstraint{ty});
return false;
}
};
bool ConstraintSolver::tryDispatch(const TypeAliasExpansionConstraint& c, NotNull<const Constraint> constraint)
{
const PendingExpansionType* petv = get<PendingExpansionType>(follow(c.target));
@ -1246,7 +1269,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
LUAU_ASSERT(instantiated); // TODO FIXME HANDLE THIS
Unifier u{normalizer, Mode::Strict, constraint->scope, Location{}, Covariant};
u.useScopes = true;
u.enableScopeTests();
u.tryUnify(*instantiated, inferredTy, /* isFunctionCall */ true);
@ -1278,8 +1301,12 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
u.log.commit();
unblock(changedTypes);
unblock(changedPacks);
unblock(c.result);
InstantiationQueuer queuer{constraint->scope, constraint->location, this};
queuer.traverse(fn);
queuer.traverse(inferredTy);
return true;
}
}
@ -1295,7 +1322,7 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
// We found no matching overloads.
Unifier u{normalizer, Mode::Strict, constraint->scope, Location{}, Covariant};
u.useScopes = true;
u.enableScopeTests();
u.tryUnify(inferredTy, builtinTypes->anyType);
u.tryUnify(fn, builtinTypes->anyType);
@ -1305,8 +1332,12 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
unblock(changedTypes);
unblock(changedPacks);
unblock(c.result);
InstantiationQueuer queuer{constraint->scope, constraint->location, this};
queuer.traverse(fn);
queuer.traverse(inferredTy);
return true;
}
@ -1567,8 +1598,11 @@ bool ConstraintSolver::tryDispatch(const SetIndexerConstraint& c, NotNull<const
}
else if (tt->state == TableState::Free || tt->state == TableState::Unsealed)
{
TypeId promotedIndexTy = arena->freshType(tt->scope);
unify(c.indexType, promotedIndexTy, constraint->scope);
auto mtt = getMutable<TableType>(subjectType);
mtt->indexer = TableIndexer{c.indexType, c.propType};
mtt->indexer = TableIndexer{promotedIndexTy, c.propType};
asMutable(c.propType)->ty.emplace<FreeType>(tt->scope);
asMutable(c.resultType)->ty.emplace<BoundType>(subjectType);
unblock(c.propType);
@ -1666,6 +1700,52 @@ bool ConstraintSolver::tryDispatch(const UnpackConstraint& c, NotNull<const Cons
return true;
}
bool ConstraintSolver::tryDispatch(const ReduceConstraint& c, NotNull<const Constraint> constraint, bool force)
{
TypeId ty = follow(c.ty);
FamilyGraphReductionResult result = reduceFamilies(ty, constraint->location, NotNull{arena}, builtinTypes, nullptr, force);
for (TypeId r : result.reducedTypes)
unblock(r);
for (TypePackId r : result.reducedPacks)
unblock(r);
if (force)
return true;
for (TypeId b : result.blockedTypes)
block(b, constraint);
for (TypePackId b : result.blockedPacks)
block(b, constraint);
return result.blockedTypes.empty() && result.blockedPacks.empty();
}
bool ConstraintSolver::tryDispatch(const ReducePackConstraint& c, NotNull<const Constraint> constraint, bool force)
{
TypePackId tp = follow(c.tp);
FamilyGraphReductionResult result = reduceFamilies(tp, constraint->location, NotNull{arena}, builtinTypes, nullptr, force);
for (TypeId r : result.reducedTypes)
unblock(r);
for (TypePackId r : result.reducedPacks)
unblock(r);
if (force)
return true;
for (TypeId b : result.blockedTypes)
block(b, constraint);
for (TypePackId b : result.blockedPacks)
block(b, constraint);
return result.blockedTypes.empty() && result.blockedPacks.empty();
}
bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const IterableConstraint& c, NotNull<const Constraint> constraint, bool force)
{
auto block_ = [&](auto&& t) {
@ -2031,7 +2111,7 @@ template <typename TID>
bool ConstraintSolver::tryUnify(NotNull<const Constraint> constraint, TID subTy, TID superTy)
{
Unifier u{normalizer, Mode::Strict, constraint->scope, constraint->location, Covariant};
u.useScopes = true;
u.enableScopeTests();
u.tryUnify(subTy, superTy);
@ -2195,10 +2275,11 @@ void ConstraintSolver::unblock(NotNull<const Constraint> progressed)
return unblock_(progressed.get());
}
void ConstraintSolver::unblock(TypeId progressed)
void ConstraintSolver::unblock(TypeId ty)
{
DenseHashSet<TypeId> seen{nullptr};
TypeId progressed = ty;
while (true)
{
if (seen.find(progressed))
@ -2256,7 +2337,7 @@ bool ConstraintSolver::isBlocked(NotNull<const Constraint> constraint)
void ConstraintSolver::unify(TypeId subType, TypeId superType, NotNull<Scope> scope)
{
Unifier u{normalizer, Mode::Strict, scope, Location{}, Covariant};
u.useScopes = true;
u.enableScopeTests();
u.tryUnify(subType, superType);
@ -2279,7 +2360,7 @@ void ConstraintSolver::unify(TypePackId subPack, TypePackId superPack, NotNull<S
{
UnifierSharedState sharedState{&iceReporter};
Unifier u{normalizer, Mode::Strict, scope, Location{}, Covariant};
u.useScopes = true;
u.enableScopeTests();
u.tryUnify(subPack, superPack);
@ -2374,7 +2455,7 @@ TypeId ConstraintSolver::unionOfTypes(TypeId a, TypeId b, NotNull<Scope> scope,
if (unifyFreeTypes && (get<FreeType>(a) || get<FreeType>(b)))
{
Unifier u{normalizer, Mode::Strict, scope, Location{}, Covariant};
u.useScopes = true;
u.enableScopeTests();
u.tryUnify(b, a);
if (u.errors.empty())

View File

@ -484,6 +484,16 @@ struct ErrorConverter
{
return "Attempting a dynamic property access on type '" + Luau::toString(e.ty) + "' is unsafe and may cause exceptions at runtime";
}
std::string operator()(const UninhabitedTypeFamily& e) const
{
return "Type family instance " + Luau::toString(e.ty) + " is uninhabited";
}
std::string operator()(const UninhabitedTypePackFamily& e) const
{
return "Type pack family instance " + Luau::toString(e.tp) + " is uninhabited";
}
};
struct InvalidNameChecker
@ -786,6 +796,16 @@ bool DynamicPropertyLookupOnClassesUnsafe::operator==(const DynamicPropertyLooku
return ty == rhs.ty;
}
bool UninhabitedTypeFamily::operator==(const UninhabitedTypeFamily& rhs) const
{
return ty == rhs.ty;
}
bool UninhabitedTypePackFamily::operator==(const UninhabitedTypePackFamily& rhs) const
{
return tp == rhs.tp;
}
std::string toString(const TypeError& error)
{
return toString(error, TypeErrorToStringOptions{});
@ -944,6 +964,10 @@ void copyError(T& e, TypeArena& destArena, CloneState cloneState)
}
else if constexpr (std::is_same_v<T, DynamicPropertyLookupOnClassesUnsafe>)
e.ty = clone(e.ty);
else if constexpr (std::is_same_v<T, UninhabitedTypeFamily>)
e.ty = clone(e.ty);
else if constexpr (std::is_same_v<T, UninhabitedTypePackFamily>)
e.tp = clone(e.tp);
else
static_assert(always_false_v<T>, "Non-exhaustive type switch");
}

View File

@ -192,6 +192,10 @@ static void errorToString(std::ostream& stream, const T& err)
stream << "TypePackMismatch { wanted = '" + toString(err.wantedTp) + "', given = '" + toString(err.givenTp) + "' }";
else if constexpr (std::is_same_v<T, DynamicPropertyLookupOnClassesUnsafe>)
stream << "DynamicPropertyLookupOnClassesUnsafe { " << toString(err.ty) << " }";
else if constexpr (std::is_same_v<T, UninhabitedTypeFamily>)
stream << "UninhabitedTypeFamily { " << toString(err.ty) << " }";
else if constexpr (std::is_same_v<T, UninhabitedTypePackFamily>)
stream << "UninhabitedTypePackFamily { " << toString(err.tp) << " }";
else
static_assert(always_false_v<T>, "Non-exhaustive type switch");
}

View File

@ -154,8 +154,8 @@ void quantify(TypeId ty, TypeLevel level)
struct PureQuantifier : Substitution
{
Scope* scope;
std::vector<TypeId> insertedGenerics;
std::vector<TypePackId> insertedGenericPacks;
OrderedMap<TypeId, TypeId> insertedGenerics;
OrderedMap<TypePackId, TypePackId> insertedGenericPacks;
bool seenMutableType = false;
bool seenGenericType = false;
@ -203,7 +203,7 @@ struct PureQuantifier : Substitution
if (auto ftv = get<FreeType>(ty))
{
TypeId result = arena->addType(GenericType{scope});
insertedGenerics.push_back(result);
insertedGenerics.push(ty, result);
return result;
}
else if (auto ttv = get<TableType>(ty))
@ -217,7 +217,10 @@ struct PureQuantifier : Substitution
resultTable->scope = scope;
if (ttv->state == TableState::Free)
{
resultTable->state = TableState::Generic;
insertedGenerics.push(ty, result);
}
else if (ttv->state == TableState::Unsealed)
resultTable->state = TableState::Sealed;
@ -231,8 +234,8 @@ struct PureQuantifier : Substitution
{
if (auto ftp = get<FreeTypePack>(tp))
{
TypePackId result = arena->addTypePack(TypePackVar{GenericTypePack{}});
insertedGenericPacks.push_back(result);
TypePackId result = arena->addTypePack(TypePackVar{GenericTypePack{scope}});
insertedGenericPacks.push(tp, result);
return result;
}
@ -252,7 +255,7 @@ struct PureQuantifier : Substitution
}
};
std::optional<TypeId> quantify(TypeArena* arena, TypeId ty, Scope* scope)
std::optional<QuantifierResult> quantify(TypeArena* arena, TypeId ty, Scope* scope)
{
PureQuantifier quantifier{arena, scope};
std::optional<TypeId> result = quantifier.substitute(ty);
@ -262,11 +265,20 @@ std::optional<TypeId> quantify(TypeArena* arena, TypeId ty, Scope* scope)
FunctionType* ftv = getMutable<FunctionType>(*result);
LUAU_ASSERT(ftv);
ftv->scope = scope;
ftv->generics.insert(ftv->generics.end(), quantifier.insertedGenerics.begin(), quantifier.insertedGenerics.end());
ftv->genericPacks.insert(ftv->genericPacks.end(), quantifier.insertedGenericPacks.begin(), quantifier.insertedGenericPacks.end());
for (auto k : quantifier.insertedGenerics.keys)
{
TypeId g = quantifier.insertedGenerics.pairings[k];
if (get<GenericType>(g))
ftv->generics.push_back(g);
}
for (auto k : quantifier.insertedGenericPacks.keys)
ftv->genericPacks.push_back(quantifier.insertedGenericPacks.pairings[k]);
ftv->hasNoGenerics = ftv->generics.empty() && ftv->genericPacks.empty() && !quantifier.seenGenericType && !quantifier.seenMutableType;
return *result;
return std::optional<QuantifierResult>({*result, std::move(quantifier.insertedGenerics), std::move(quantifier.insertedGenericPacks)});
}
} // namespace Luau

View File

@ -78,6 +78,11 @@ static TypeId DEPRECATED_shallowClone(TypeId ty, TypeArena& dest, const TxnLog*
{
result = dest.addType(NegationType{ntv->ty});
}
else if (const TypeFamilyInstanceType* tfit = get<TypeFamilyInstanceType>(ty))
{
TypeFamilyInstanceType clone{tfit->family, tfit->typeArguments, tfit->packArguments};
result = dest.addType(std::move(clone));
}
else
return result;
@ -168,14 +173,27 @@ static TypeId shallowClone(TypeId ty, TypeArena& dest, const TxnLog* log, bool a
{
if (alwaysClone)
{
ClassType clone{a.name, a.props, a.parent, a.metatable, a.tags, a.userData, a.definitionModuleName};
return dest.addType(std::move(clone));
if (FFlag::LuauTypecheckClassTypeIndexers)
{
ClassType clone{a.name, a.props, a.parent, a.metatable, a.tags, a.userData, a.definitionModuleName, a.indexer};
return dest.addType(std::move(clone));
}
else
{
ClassType clone{a.name, a.props, a.parent, a.metatable, a.tags, a.userData, a.definitionModuleName};
return dest.addType(std::move(clone));
}
}
else
return ty;
}
else if constexpr (std::is_same_v<T, NegationType>)
return dest.addType(NegationType{a.ty});
else if constexpr (std::is_same_v<T, TypeFamilyInstanceType>)
{
TypeFamilyInstanceType clone{a.family, a.typeArguments, a.packArguments};
return dest.addType(std::move(clone));
}
else
static_assert(always_false_v<T>, "Non-exhaustive shallowClone switch");
};
@ -255,6 +273,14 @@ void Tarjan::visitChildren(TypeId ty, int index)
for (TypePackId a : petv->packArguments)
visitChild(a);
}
else if (const TypeFamilyInstanceType* tfit = get<TypeFamilyInstanceType>(ty))
{
for (TypeId a : tfit->typeArguments)
visitChild(a);
for (TypePackId a : tfit->packArguments)
visitChild(a);
}
else if (const ClassType* ctv = get<ClassType>(ty); FFlag::LuauClassTypeVarsInSubstitution && ctv)
{
for (const auto& [name, prop] : ctv->props)
@ -265,6 +291,15 @@ void Tarjan::visitChildren(TypeId ty, int index)
if (ctv->metatable)
visitChild(*ctv->metatable);
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (ctv->indexer)
{
visitChild(ctv->indexer->indexType);
visitChild(ctv->indexer->indexResultType);
}
}
}
else if (const NegationType* ntv = get<NegationType>(ty))
{
@ -669,6 +704,14 @@ TypePackId Substitution::clone(TypePackId tp)
clone.hidden = vtp->hidden;
return addTypePack(std::move(clone));
}
else if (const TypeFamilyInstanceTypePack* tfitp = get<TypeFamilyInstanceTypePack>(tp))
{
TypeFamilyInstanceTypePack clone{
tfitp->family, std::vector<TypeId>(tfitp->typeArguments.size()), std::vector<TypePackId>(tfitp->packArguments.size())};
clone.typeArguments.assign(tfitp->typeArguments.begin(), tfitp->typeArguments.end());
clone.packArguments.assign(tfitp->packArguments.begin(), tfitp->packArguments.end());
return addTypePack(std::move(clone));
}
else if (FFlag::LuauClonePublicInterfaceLess2)
{
return addTypePack(*tp);
@ -786,6 +829,14 @@ void Substitution::replaceChildren(TypeId ty)
for (TypePackId& a : petv->packArguments)
a = replace(a);
}
else if (TypeFamilyInstanceType* tfit = getMutable<TypeFamilyInstanceType>(ty))
{
for (TypeId& a : tfit->typeArguments)
a = replace(a);
for (TypePackId& a : tfit->packArguments)
a = replace(a);
}
else if (ClassType* ctv = getMutable<ClassType>(ty); FFlag::LuauClassTypeVarsInSubstitution && ctv)
{
for (auto& [name, prop] : ctv->props)
@ -796,6 +847,15 @@ void Substitution::replaceChildren(TypeId ty)
if (ctv->metatable)
ctv->metatable = replace(*ctv->metatable);
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (ctv->indexer)
{
ctv->indexer->indexType = replace(ctv->indexer->indexType);
ctv->indexer->indexResultType = replace(ctv->indexer->indexResultType);
}
}
}
else if (NegationType* ntv = getMutable<NegationType>(ty))
{
@ -824,6 +884,14 @@ void Substitution::replaceChildren(TypePackId tp)
{
vtp->ty = replace(vtp->ty);
}
else if (TypeFamilyInstanceTypePack* tfitp = getMutable<TypeFamilyInstanceTypePack>(tp))
{
for (TypeId& t : tfitp->typeArguments)
t = replace(t);
for (TypePackId& t : tfitp->packArguments)
t = replace(t);
}
}
} // namespace Luau

View File

@ -257,6 +257,15 @@ void StateDot::visitChildren(TypeId ty, int index)
if (ctv->metatable)
visitChild(*ctv->metatable, index, "[metatable]");
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (ctv->indexer)
{
visitChild(ctv->indexer->indexType, index, "[index]");
visitChild(ctv->indexer->indexResultType, index, "[value]");
}
}
}
else if (const SingletonType* stv = get<SingletonType>(ty))
{

View File

@ -8,6 +8,7 @@
#include "Luau/TypeInfer.h"
#include "Luau/TypePack.h"
#include "Luau/Type.h"
#include "Luau/TypeFamily.h"
#include "Luau/VisitType.h"
#include <algorithm>
@ -16,11 +17,22 @@
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
/*
* Prefix generic typenames with gen-
* Additionally, free types will be prefixed with free- and suffixed with their level. eg free-a-4
* Fair warning: Setting this will break a lot of Luau unit tests.
* Enables increasing levels of verbosity for Luau type names when stringifying.
* After level 2, test cases will break unpredictably because a pointer to their
* scope will be included in the stringification of generic and free types.
*
* Supported values:
*
* 0: Disabled, no changes.
*
* 1: Prefix free/generic types with free- and gen-, respectively. Also reveal
* hidden variadic tails.
*
* 2: Suffix free/generic types with their scope depth.
*
* 3: Suffix free/generic types with their scope pointer, if present.
*/
LUAU_FASTFLAGVARIABLE(DebugLuauVerboseTypeNames, false)
LUAU_FASTINTVARIABLE(DebugLuauVerboseTypeNames, 0)
LUAU_FASTFLAGVARIABLE(DebugLuauToStringNoLexicalSort, false)
namespace Luau
@ -223,11 +235,15 @@ struct StringifierState
++count;
emit(count);
emit("-");
char buffer[16];
uint32_t s = uint32_t(intptr_t(scope) & 0xFFFFFF);
snprintf(buffer, sizeof(buffer), "0x%x", s);
emit(buffer);
if (FInt::DebugLuauVerboseTypeNames >= 3)
{
emit("-");
char buffer[16];
uint32_t s = uint32_t(intptr_t(scope) & 0xFFFFFF);
snprintf(buffer, sizeof(buffer), "0x%x", s);
emit(buffer);
}
}
void emit(TypeLevel level)
@ -371,11 +387,13 @@ struct TypeStringifier
void operator()(TypeId ty, const FreeType& ftv)
{
state.result.invalid = true;
if (FFlag::DebugLuauVerboseTypeNames)
if (FInt::DebugLuauVerboseTypeNames >= 1)
state.emit("free-");
state.emit(state.getName(ty));
if (FFlag::DebugLuauVerboseTypeNames)
if (FInt::DebugLuauVerboseTypeNames >= 2)
{
state.emit("-");
if (FFlag::DebugLuauDeferredConstraintResolution)
@ -392,6 +410,9 @@ struct TypeStringifier
void operator()(TypeId ty, const GenericType& gtv)
{
if (FInt::DebugLuauVerboseTypeNames >= 1)
state.emit("gen-");
if (gtv.explicitName)
{
state.usedNames.insert(gtv.name);
@ -401,7 +422,7 @@ struct TypeStringifier
else
state.emit(state.getName(ty));
if (FFlag::DebugLuauVerboseTypeNames)
if (FInt::DebugLuauVerboseTypeNames >= 2)
{
state.emit("-");
if (FFlag::DebugLuauDeferredConstraintResolution)
@ -871,6 +892,33 @@ struct TypeStringifier
if (parens)
state.emit(")");
}
void operator()(TypeId, const TypeFamilyInstanceType& tfitv)
{
state.emit(tfitv.family->name);
state.emit("<");
bool comma = false;
for (TypeId ty : tfitv.typeArguments)
{
if (comma)
state.emit(", ");
comma = true;
stringify(ty);
}
for (TypePackId tp : tfitv.packArguments)
{
if (comma)
state.emit(", ");
comma = true;
stringify(tp);
}
state.emit(">");
}
};
struct TypePackStringifier
@ -958,7 +1006,7 @@ struct TypePackStringifier
if (tp.tail && !isEmpty(*tp.tail))
{
TypePackId tail = follow(*tp.tail);
if (auto vtp = get<VariadicTypePack>(tail); !vtp || (!FFlag::DebugLuauVerboseTypeNames && !vtp->hidden))
if (auto vtp = get<VariadicTypePack>(tail); !vtp || (FInt::DebugLuauVerboseTypeNames < 1 && !vtp->hidden))
{
if (first)
first = false;
@ -981,7 +1029,7 @@ struct TypePackStringifier
void operator()(TypePackId, const VariadicTypePack& pack)
{
state.emit("...");
if (FFlag::DebugLuauVerboseTypeNames && pack.hidden)
if (FInt::DebugLuauVerboseTypeNames >= 1 && pack.hidden)
{
state.emit("*hidden*");
}
@ -990,6 +1038,9 @@ struct TypePackStringifier
void operator()(TypePackId tp, const GenericTypePack& pack)
{
if (FInt::DebugLuauVerboseTypeNames >= 1)
state.emit("gen-");
if (pack.explicitName)
{
state.usedNames.insert(pack.name);
@ -1001,7 +1052,7 @@ struct TypePackStringifier
state.emit(state.getName(tp));
}
if (FFlag::DebugLuauVerboseTypeNames)
if (FInt::DebugLuauVerboseTypeNames >= 2)
{
state.emit("-");
if (FFlag::DebugLuauDeferredConstraintResolution)
@ -1009,17 +1060,18 @@ struct TypePackStringifier
else
state.emit(pack.level);
}
state.emit("...");
}
void operator()(TypePackId tp, const FreeTypePack& pack)
{
state.result.invalid = true;
if (FFlag::DebugLuauVerboseTypeNames)
if (FInt::DebugLuauVerboseTypeNames >= 1)
state.emit("free-");
state.emit(state.getName(tp));
if (FFlag::DebugLuauVerboseTypeNames)
if (FInt::DebugLuauVerboseTypeNames >= 2)
{
state.emit("-");
if (FFlag::DebugLuauDeferredConstraintResolution)
@ -1042,6 +1094,33 @@ struct TypePackStringifier
state.emit(btp.index);
state.emit("*");
}
void operator()(TypePackId, const TypeFamilyInstanceTypePack& tfitp)
{
state.emit(tfitp.family->name);
state.emit("<");
bool comma = false;
for (TypeId p : tfitp.typeArguments)
{
if (comma)
state.emit(", ");
comma = true;
stringify(p);
}
for (TypePackId p : tfitp.packArguments)
{
if (comma)
state.emit(", ");
comma = true;
stringify(p);
}
state.emit(">");
}
};
void TypeStringifier::stringify(TypePackId tp)
@ -1560,6 +1639,12 @@ std::string toString(const Constraint& constraint, ToStringOptions& opts)
}
else if constexpr (std::is_same_v<T, UnpackConstraint>)
return tos(c.resultPack) + " ~ unpack " + tos(c.sourcePack);
else if constexpr (std::is_same_v<T, ReduceConstraint>)
return "reduce " + tos(c.ty);
else if constexpr (std::is_same_v<T, ReducePackConstraint>)
{
return "reduce " + tos(c.tp);
}
else
static_assert(always_false_v<T>, "Non-exhaustive constraint switch");
};

View File

@ -1,6 +1,7 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/TxnLog.h"
#include "Luau/Scope.h"
#include "Luau/ToString.h"
#include "Luau/TypeArena.h"
#include "Luau/TypePack.h"
@ -8,6 +9,8 @@
#include <algorithm>
#include <stdexcept>
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
namespace Luau
{
@ -71,7 +74,11 @@ const TxnLog* TxnLog::empty()
void TxnLog::concat(TxnLog rhs)
{
for (auto& [ty, rep] : rhs.typeVarChanges)
{
if (rep->dead)
continue;
typeVarChanges[ty] = std::move(rep);
}
for (auto& [tp, rep] : rhs.typePackChanges)
typePackChanges[tp] = std::move(rep);
@ -81,7 +88,10 @@ void TxnLog::concatAsIntersections(TxnLog rhs, NotNull<TypeArena> arena)
{
for (auto& [ty, rightRep] : rhs.typeVarChanges)
{
if (auto leftRep = typeVarChanges.find(ty))
if (rightRep->dead)
continue;
if (auto leftRep = typeVarChanges.find(ty); leftRep && !(*leftRep)->dead)
{
TypeId leftTy = arena->addType((*leftRep)->pending);
TypeId rightTy = arena->addType(rightRep->pending);
@ -97,16 +107,94 @@ void TxnLog::concatAsIntersections(TxnLog rhs, NotNull<TypeArena> arena)
void TxnLog::concatAsUnion(TxnLog rhs, NotNull<TypeArena> arena)
{
for (auto& [ty, rightRep] : rhs.typeVarChanges)
if (FFlag::DebugLuauDeferredConstraintResolution)
{
if (auto leftRep = typeVarChanges.find(ty))
/*
* Check for cycles.
*
* We must not combine a log entry that binds 'a to 'b with a log that
* binds 'b to 'a.
*
* Of the two, identify the one with the 'bigger' scope and eliminate the
* entry that rebinds it.
*/
for (const auto& [rightTy, rightRep] : rhs.typeVarChanges)
{
TypeId leftTy = arena->addType((*leftRep)->pending);
TypeId rightTy = arena->addType(rightRep->pending);
typeVarChanges[ty]->pending.ty = UnionType{{leftTy, rightTy}};
if (rightRep->dead)
continue;
// We explicitly use get_if here because we do not wish to do anything
// if the uncommitted type is already bound to something else.
const FreeType* rf = get_if<FreeType>(&rightTy->ty);
if (!rf)
continue;
const BoundType* rb = Luau::get<BoundType>(&rightRep->pending);
if (!rb)
continue;
const TypeId leftTy = rb->boundTo;
const FreeType* lf = get_if<FreeType>(&leftTy->ty);
if (!lf)
continue;
auto leftRep = typeVarChanges.find(leftTy);
if (!leftRep)
continue;
if ((*leftRep)->dead)
continue;
const BoundType* lb = Luau::get<BoundType>(&(*leftRep)->pending);
if (!lb)
continue;
if (lb->boundTo == rightTy)
{
// leftTy has been bound to rightTy, but rightTy has also been bound
// to leftTy. We find the one that belongs to the more deeply nested
// scope and remove it from the log.
const bool discardLeft = useScopes ? subsumes(lf->scope, rf->scope) : lf->level.subsumes(rf->level);
if (discardLeft)
(*leftRep)->dead = true;
else
rightRep->dead = true;
}
}
for (auto& [ty, rightRep] : rhs.typeVarChanges)
{
if (rightRep->dead)
continue;
if (auto leftRep = typeVarChanges.find(ty); leftRep && !(*leftRep)->dead)
{
TypeId leftTy = arena->addType((*leftRep)->pending);
TypeId rightTy = arena->addType(rightRep->pending);
if (follow(leftTy) == follow(rightTy))
typeVarChanges[ty] = std::move(rightRep);
else
typeVarChanges[ty]->pending.ty = UnionType{{leftTy, rightTy}};
}
else
typeVarChanges[ty] = std::move(rightRep);
}
}
else
{
for (auto& [ty, rightRep] : rhs.typeVarChanges)
{
if (auto leftRep = typeVarChanges.find(ty))
{
TypeId leftTy = arena->addType((*leftRep)->pending);
TypeId rightTy = arena->addType(rightRep->pending);
typeVarChanges[ty]->pending.ty = UnionType{{leftTy, rightTy}};
}
else
typeVarChanges[ty] = std::move(rightRep);
}
else
typeVarChanges[ty] = std::move(rightRep);
}
for (auto& [tp, rep] : rhs.typePackChanges)
@ -116,7 +204,10 @@ void TxnLog::concatAsUnion(TxnLog rhs, NotNull<TypeArena> arena)
void TxnLog::commit()
{
for (auto& [ty, rep] : typeVarChanges)
asMutable(ty)->reassign(rep.get()->pending);
{
if (!rep->dead)
asMutable(ty)->reassign(rep.get()->pending);
}
for (auto& [tp, rep] : typePackChanges)
asMutable(tp)->reassign(rep.get()->pending);
@ -135,7 +226,10 @@ TxnLog TxnLog::inverse()
TxnLog inversed(sharedSeen);
for (auto& [ty, _rep] : typeVarChanges)
inversed.typeVarChanges[ty] = std::make_unique<PendingType>(*ty);
{
if (!_rep->dead)
inversed.typeVarChanges[ty] = std::make_unique<PendingType>(*ty);
}
for (auto& [tp, _rep] : typePackChanges)
inversed.typePackChanges[tp] = std::make_unique<PendingTypePack>(*tp);
@ -204,7 +298,7 @@ PendingType* TxnLog::queue(TypeId ty)
// Explicitly don't look in ancestors. If we have discovered something new
// about this type, we don't want to mutate the parent's state.
auto& pending = typeVarChanges[ty];
if (!pending)
if (!pending || (*pending).dead)
{
pending = std::make_unique<PendingType>(*ty);
pending->pending.owningArena = nullptr;
@ -237,7 +331,7 @@ PendingType* TxnLog::pending(TypeId ty) const
for (const TxnLog* current = this; current; current = current->parent)
{
if (auto it = current->typeVarChanges.find(ty))
if (auto it = current->typeVarChanges.find(ty); it && !(*it)->dead)
return it->get();
}

View File

@ -9,6 +9,7 @@
#include "Luau/TypeInfer.h"
#include "Luau/TypePack.h"
#include "Luau/Type.h"
#include "Luau/TypeFamily.h"
#include <string>
@ -362,6 +363,10 @@ public:
// FIXME: do the same thing we do with ErrorType
throw InternalCompilerError("Cannot convert NegationType into AstNode");
}
AstType* operator()(const TypeFamilyInstanceType& tfit)
{
return allocator->alloc<AstTypeReference>(Location(), std::nullopt, AstName{tfit.family->name.c_str()}, std::nullopt, Location());
}
private:
Allocator* allocator;
@ -432,6 +437,11 @@ public:
return allocator->alloc<AstTypePackGeneric>(Location(), AstName("Unifiable<Error>"));
}
AstTypePack* operator()(const TypeFamilyInstanceTypePack& tfitp) const
{
return allocator->alloc<AstTypePackGeneric>(Location(), AstName(tfitp.family->name.c_str()));
}
private:
Allocator* allocator;
SyntheticNames* syntheticNames;

View File

@ -16,6 +16,7 @@
#include "Luau/TypeReduction.h"
#include "Luau/TypeUtils.h"
#include "Luau/Unifier.h"
#include "Luau/TypeFamily.h"
#include <algorithm>
@ -113,6 +114,13 @@ struct TypeChecker2
return std::nullopt;
}
TypeId checkForFamilyInhabitance(TypeId instance, Location location)
{
TxnLog fake{};
reportErrors(reduceFamilies(instance, location, NotNull{&testArena}, builtinTypes, &fake, true).errors);
return instance;
}
TypePackId lookupPack(AstExpr* expr)
{
// If a type isn't in the type graph, it probably means that a recursion limit was exceeded.
@ -132,11 +140,11 @@ struct TypeChecker2
// allows us not to think about this very much in the actual typechecking logic.
TypeId* ty = module->astTypes.find(expr);
if (ty)
return follow(*ty);
return checkForFamilyInhabitance(follow(*ty), expr->location);
TypePackId* tp = module->astTypePacks.find(expr);
if (tp)
return flattenPack(*tp);
return checkForFamilyInhabitance(flattenPack(*tp), expr->location);
return builtinTypes->anyType;
}
@ -159,7 +167,7 @@ struct TypeChecker2
TypeId* ty = module->astResolvedTypes.find(annotation);
LUAU_ASSERT(ty);
return follow(*ty);
return checkForFamilyInhabitance(follow(*ty), annotation->location);
}
TypePackId lookupPackAnnotation(AstTypePack* annotation)
@ -311,6 +319,7 @@ struct TypeChecker2
TypePackId actualRetType = reconstructPack(ret->list, *arena);
Unifier u{NotNull{&normalizer}, Mode::Strict, stack.back(), ret->location, Covariant};
u.hideousFixMeGenericsAreActuallyFree = true;
u.tryUnify(actualRetType, expectedRetType);
const bool ok = u.errors.empty() && u.log.empty();
@ -989,8 +998,11 @@ struct TypeChecker2
return;
}
TxnLog fake{};
LUAU_ASSERT(ftv);
reportErrors(tryUnify(stack.back(), call->location, ftv->retTypes, expectedRetType, CountMismatch::Context::Return));
reportErrors(tryUnify(stack.back(), call->location, ftv->retTypes, expectedRetType, CountMismatch::Context::Return, /* genericsOkay */ true));
reportErrors(reduceFamilies(ftv->retTypes, call->location, NotNull{&testArena}, builtinTypes, &fake, true).errors);
auto it = begin(expectedArgTypes);
size_t i = 0;
@ -1007,7 +1019,8 @@ struct TypeChecker2
Location argLoc = argLocs.at(i >= argLocs.size() ? argLocs.size() - 1 : i);
reportErrors(tryUnify(stack.back(), argLoc, expectedArg, arg));
reportErrors(tryUnify(stack.back(), argLoc, expectedArg, arg, CountMismatch::Context::Arg, /* genericsOkay */ true));
reportErrors(reduceFamilies(arg, argLoc, NotNull{&testArena}, builtinTypes, &fake, true).errors);
++it;
++i;
@ -1018,7 +1031,8 @@ struct TypeChecker2
if (auto tail = it.tail())
{
TypePackId remainingArgs = testArena.addTypePack(TypePack{std::move(slice), std::nullopt});
reportErrors(tryUnify(stack.back(), argLocs.back(), *tail, remainingArgs));
reportErrors(tryUnify(stack.back(), argLocs.back(), *tail, remainingArgs, CountMismatch::Context::Arg, /* genericsOkay */ true));
reportErrors(reduceFamilies(remainingArgs, argLocs.back(), NotNull{&testArena}, builtinTypes, &fake, true).errors);
}
}
@ -1344,7 +1358,7 @@ struct TypeChecker2
else if (get<AnyType>(rightType) || get<ErrorType>(rightType))
return rightType;
if ((get<BlockedType>(leftType) || get<FreeType>(leftType)) && !isEquality && !isLogical)
if ((get<BlockedType>(leftType) || get<FreeType>(leftType) || get<GenericType>(leftType)) && !isEquality && !isLogical)
{
auto name = getIdentifierOfBaseVar(expr->left);
reportError(CannotInferBinaryOperation{expr->op, name,
@ -1591,10 +1605,10 @@ struct TypeChecker2
TypeId computedType = lookupType(expr->expr);
// Note: As an optimization, we try 'number <: number | string' first, as that is the more likely case.
if (isSubtype(annotationType, computedType, stack.back()))
if (isSubtype(annotationType, computedType, stack.back(), true))
return;
if (isSubtype(computedType, annotationType, stack.back()))
if (isSubtype(computedType, annotationType, stack.back(), true))
return;
reportError(TypesAreUnrelated{computedType, annotationType}, expr->location);
@ -1679,6 +1693,10 @@ struct TypeChecker2
void visit(AstType* ty)
{
TypeId* resolvedTy = module->astResolvedTypes.find(ty);
if (resolvedTy)
checkForFamilyInhabitance(follow(*resolvedTy), ty->location);
if (auto t = ty->as<AstTypeReference>())
return visit(t);
else if (auto t = ty->as<AstTypeTable>())
@ -1989,11 +2007,12 @@ struct TypeChecker2
}
template<typename TID>
bool isSubtype(TID subTy, TID superTy, NotNull<Scope> scope)
bool isSubtype(TID subTy, TID superTy, NotNull<Scope> scope, bool genericsOkay = false)
{
TypeArena arena;
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, Location{}, Covariant};
u.useScopes = true;
u.hideousFixMeGenericsAreActuallyFree = genericsOkay;
u.enableScopeTests();
u.tryUnify(subTy, superTy);
const bool ok = u.errors.empty() && u.log.empty();
@ -2001,11 +2020,13 @@ struct TypeChecker2
}
template<typename TID>
ErrorVec tryUnify(NotNull<Scope> scope, const Location& location, TID subTy, TID superTy, CountMismatch::Context context = CountMismatch::Arg)
ErrorVec tryUnify(NotNull<Scope> scope, const Location& location, TID subTy, TID superTy, CountMismatch::Context context = CountMismatch::Arg,
bool genericsOkay = false)
{
Unifier u{NotNull{&normalizer}, Mode::Strict, scope, location, Covariant};
u.ctx = context;
u.useScopes = true;
u.hideousFixMeGenericsAreActuallyFree = genericsOkay;
u.enableScopeTests();
u.tryUnify(subTy, superTy);
return std::move(u.errors);

310
Analysis/src/TypeFamily.cpp Normal file
View File

@ -0,0 +1,310 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/TypeFamily.h"
#include "Luau/DenseHash.h"
#include "Luau/VisitType.h"
#include "Luau/TxnLog.h"
#include "Luau/Substitution.h"
#include "Luau/ToString.h"
LUAU_DYNAMIC_FASTINTVARIABLE(LuauTypeFamilyGraphReductionMaximumSteps, 1'000'000);
namespace Luau
{
struct InstanceCollector : TypeOnceVisitor
{
std::deque<TypeId> tys;
std::deque<TypePackId> tps;
bool visit(TypeId ty, const TypeFamilyInstanceType&) override
{
// TypeOnceVisitor performs a depth-first traversal in the absence of
// cycles. This means that by pushing to the front of the queue, we will
// try to reduce deeper instances first if we start with the first thing
// in the queue. Consider Add<Add<Add<number, number>, number>, number>:
// we want to reduce the innermost Add<number, number> instantiation
// first.
tys.push_front(ty);
return true;
}
bool visit(TypePackId tp, const TypeFamilyInstanceTypePack&) override
{
// TypeOnceVisitor performs a depth-first traversal in the absence of
// cycles. This means that by pushing to the front of the queue, we will
// try to reduce deeper instances first if we start with the first thing
// in the queue. Consider Add<Add<Add<number, number>, number>, number>:
// we want to reduce the innermost Add<number, number> instantiation
// first.
tps.push_front(tp);
return true;
}
};
struct FamilyReducer
{
std::deque<TypeId> queuedTys;
std::deque<TypePackId> queuedTps;
DenseHashSet<const void*> irreducible{nullptr};
FamilyGraphReductionResult result;
Location location;
NotNull<TypeArena> arena;
NotNull<BuiltinTypes> builtins;
TxnLog* log = nullptr;
NotNull<const TxnLog> reducerLog;
bool force = false;
FamilyReducer(std::deque<TypeId> queuedTys, std::deque<TypePackId> queuedTps, Location location, NotNull<TypeArena> arena,
NotNull<BuiltinTypes> builtins, TxnLog* log = nullptr, bool force = false)
: queuedTys(std::move(queuedTys))
, queuedTps(std::move(queuedTps))
, location(location)
, arena(arena)
, builtins(builtins)
, log(log)
, reducerLog(NotNull{log ? log : TxnLog::empty()})
, force(force)
{
}
enum class SkipTestResult
{
Irreducible,
Defer,
Okay,
};
SkipTestResult testForSkippability(TypeId ty)
{
ty = reducerLog->follow(ty);
if (reducerLog->is<TypeFamilyInstanceType>(ty))
{
if (!irreducible.contains(ty))
return SkipTestResult::Defer;
else
return SkipTestResult::Irreducible;
}
else if (reducerLog->is<GenericType>(ty))
{
return SkipTestResult::Irreducible;
}
return SkipTestResult::Okay;
}
SkipTestResult testForSkippability(TypePackId ty)
{
ty = reducerLog->follow(ty);
if (reducerLog->is<TypeFamilyInstanceTypePack>(ty))
{
if (!irreducible.contains(ty))
return SkipTestResult::Defer;
else
return SkipTestResult::Irreducible;
}
else if (reducerLog->is<GenericTypePack>(ty))
{
return SkipTestResult::Irreducible;
}
return SkipTestResult::Okay;
}
template<typename T>
void replace(T subject, T replacement)
{
if (log)
log->replace(subject, Unifiable::Bound{replacement});
else
asMutable(subject)->ty.template emplace<Unifiable::Bound<T>>(replacement);
if constexpr (std::is_same_v<T, TypeId>)
result.reducedTypes.insert(subject);
else if constexpr (std::is_same_v<T, TypePackId>)
result.reducedPacks.insert(subject);
}
template<typename T>
void handleFamilyReduction(T subject, TypeFamilyReductionResult<T> reduction)
{
if (reduction.result)
replace(subject, *reduction.result);
else
{
irreducible.insert(subject);
if (reduction.uninhabited || force)
{
if constexpr (std::is_same_v<T, TypeId>)
result.errors.push_back(TypeError{location, UninhabitedTypeFamily{subject}});
else if constexpr (std::is_same_v<T, TypePackId>)
result.errors.push_back(TypeError{location, UninhabitedTypePackFamily{subject}});
}
else if (!reduction.uninhabited && !force)
{
for (TypeId b : reduction.blockedTypes)
result.blockedTypes.insert(b);
for (TypePackId b : reduction.blockedPacks)
result.blockedPacks.insert(b);
}
}
}
bool done()
{
return queuedTys.empty() && queuedTps.empty();
}
template<typename T, typename I>
bool testParameters(T subject, const I* tfit)
{
for (TypeId p : tfit->typeArguments)
{
SkipTestResult skip = testForSkippability(p);
if (skip == SkipTestResult::Irreducible)
{
irreducible.insert(subject);
return false;
}
else if (skip == SkipTestResult::Defer)
{
if constexpr (std::is_same_v<T, TypeId>)
queuedTys.push_back(subject);
else if constexpr (std::is_same_v<T, TypePackId>)
queuedTps.push_back(subject);
return false;
}
}
for (TypePackId p : tfit->packArguments)
{
SkipTestResult skip = testForSkippability(p);
if (skip == SkipTestResult::Irreducible)
{
irreducible.insert(subject);
return false;
}
else if (skip == SkipTestResult::Defer)
{
if constexpr (std::is_same_v<T, TypeId>)
queuedTys.push_back(subject);
else if constexpr (std::is_same_v<T, TypePackId>)
queuedTps.push_back(subject);
return false;
}
}
return true;
}
void stepType()
{
TypeId subject = reducerLog->follow(queuedTys.front());
queuedTys.pop_front();
if (irreducible.contains(subject))
return;
if (const TypeFamilyInstanceType* tfit = reducerLog->get<TypeFamilyInstanceType>(subject))
{
if (!testParameters(subject, tfit))
return;
TypeFamilyReductionResult<TypeId> result = tfit->family->reducer(tfit->typeArguments, tfit->packArguments, arena, builtins, reducerLog);
handleFamilyReduction(subject, result);
}
}
void stepPack()
{
TypePackId subject = reducerLog->follow(queuedTps.front());
queuedTps.pop_front();
if (irreducible.contains(subject))
return;
if (const TypeFamilyInstanceTypePack* tfit = reducerLog->get<TypeFamilyInstanceTypePack>(subject))
{
if (!testParameters(subject, tfit))
return;
TypeFamilyReductionResult<TypePackId> result =
tfit->family->reducer(tfit->typeArguments, tfit->packArguments, arena, builtins, reducerLog);
handleFamilyReduction(subject, result);
}
}
void step()
{
if (!queuedTys.empty())
stepType();
else if (!queuedTps.empty())
stepPack();
}
};
static FamilyGraphReductionResult reduceFamiliesInternal(std::deque<TypeId> queuedTys, std::deque<TypePackId> queuedTps, Location location,
NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log, bool force)
{
FamilyReducer reducer{std::move(queuedTys), std::move(queuedTps), location, arena, builtins, log, force};
int iterationCount = 0;
while (!reducer.done())
{
reducer.step();
++iterationCount;
if (iterationCount > DFInt::LuauTypeFamilyGraphReductionMaximumSteps)
{
reducer.result.errors.push_back(TypeError{location, CodeTooComplex{}});
break;
}
}
return std::move(reducer.result);
}
FamilyGraphReductionResult reduceFamilies(
TypeId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log, bool force)
{
InstanceCollector collector;
try
{
collector.traverse(entrypoint);
}
catch (RecursionLimitException&)
{
return FamilyGraphReductionResult{};
}
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, arena, builtins, log, force);
}
FamilyGraphReductionResult reduceFamilies(
TypePackId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log, bool force)
{
InstanceCollector collector;
try
{
collector.traverse(entrypoint);
}
catch (RecursionLimitException&)
{
return FamilyGraphReductionResult{};
}
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, arena, builtins, log, force);
}
} // namespace Luau

View File

@ -41,6 +41,7 @@ LUAU_FASTFLAG(LuauOccursIsntAlwaysFailure)
LUAU_FASTFLAGVARIABLE(LuauTypecheckTypeguards, false)
LUAU_FASTFLAGVARIABLE(LuauTinyControlFlowAnalysis, false)
LUAU_FASTFLAG(LuauRequirePathTrueModuleName)
LUAU_FASTFLAGVARIABLE(LuauTypecheckClassTypeIndexers, false)
namespace Luau
{
@ -2104,6 +2105,23 @@ std::optional<TypeId> TypeChecker::getIndexTypeFromTypeImpl(
const Property* prop = lookupClassProp(cls, name);
if (prop)
return prop->type();
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (auto indexer = cls->indexer)
{
// TODO: Property lookup should work with string singletons or unions thereof as the indexer key type.
ErrorVec errors = tryUnify(stringType, indexer->indexType, scope, location);
if (errors.empty())
return indexer->indexResultType;
if (addErrors)
reportError(location, UnknownProperty{type, name});
return std::nullopt;
}
}
}
else if (const UnionType* utv = get<UnionType>(type))
{
@ -3295,14 +3313,38 @@ TypeId TypeChecker::checkLValueBinding(const ScopePtr& scope, const AstExprIndex
}
else if (const ClassType* lhsClass = get<ClassType>(lhs))
{
const Property* prop = lookupClassProp(lhsClass, name);
if (!prop)
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (const Property* prop = lookupClassProp(lhsClass, name))
{
return prop->type();
}
if (auto indexer = lhsClass->indexer)
{
Unifier state = mkUnifier(scope, expr.location);
state.tryUnify(stringType, indexer->indexType);
if (state.errors.empty())
{
state.log.commit();
return indexer->indexResultType;
}
}
reportError(TypeError{expr.location, UnknownProperty{lhs, name}});
return errorRecoveryType(scope);
}
else
{
const Property* prop = lookupClassProp(lhsClass, name);
if (!prop)
{
reportError(TypeError{expr.location, UnknownProperty{lhs, name}});
return errorRecoveryType(scope);
}
return prop->type();
return prop->type();
}
}
else if (get<IntersectionType>(lhs))
{
@ -3344,23 +3386,57 @@ TypeId TypeChecker::checkLValueBinding(const ScopePtr& scope, const AstExprIndex
{
if (const ClassType* exprClass = get<ClassType>(exprType))
{
const Property* prop = lookupClassProp(exprClass, value->value.data);
if (!prop)
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (const Property* prop = lookupClassProp(exprClass, value->value.data))
{
return prop->type();
}
if (auto indexer = exprClass->indexer)
{
unify(stringType, indexer->indexType, scope, expr.index->location);
return indexer->indexResultType;
}
reportError(TypeError{expr.location, UnknownProperty{exprType, value->value.data}});
return errorRecoveryType(scope);
}
return prop->type();
else
{
const Property* prop = lookupClassProp(exprClass, value->value.data);
if (!prop)
{
reportError(TypeError{expr.location, UnknownProperty{exprType, value->value.data}});
return errorRecoveryType(scope);
}
return prop->type();
}
}
}
else if (FFlag::LuauAllowIndexClassParameters)
else
{
if (const ClassType* exprClass = get<ClassType>(exprType))
if (FFlag::LuauTypecheckClassTypeIndexers)
{
if (isNonstrictMode())
return unknownType;
reportError(TypeError{expr.location, DynamicPropertyLookupOnClassesUnsafe{exprType}});
return errorRecoveryType(scope);
if (const ClassType* exprClass = get<ClassType>(exprType))
{
if (auto indexer = exprClass->indexer)
{
unify(indexType, indexer->indexType, scope, expr.index->location);
return indexer->indexResultType;
}
}
}
if (FFlag::LuauAllowIndexClassParameters)
{
if (const ClassType* exprClass = get<ClassType>(exprType))
{
if (isNonstrictMode())
return unknownType;
reportError(TypeError{expr.location, DynamicPropertyLookupOnClassesUnsafe{exprType}});
return errorRecoveryType(scope);
}
}
}

View File

@ -12,6 +12,7 @@
#include "Luau/TypeUtils.h"
#include "Luau/Type.h"
#include "Luau/VisitType.h"
#include "Luau/TypeFamily.h"
#include <algorithm>
@ -20,6 +21,7 @@ LUAU_FASTFLAG(LuauErrorRecoveryType)
LUAU_FASTFLAGVARIABLE(LuauInstantiateInSubtyping, false)
LUAU_FASTFLAGVARIABLE(LuauUninhabitedSubAnything2, false)
LUAU_FASTFLAGVARIABLE(LuauVariadicAnyCanBeGeneric, false)
LUAU_FASTFLAGVARIABLE(LuauUnifyTwoOptions, false)
LUAU_FASTFLAGVARIABLE(LuauMaintainScopesInUnifier, false)
LUAU_FASTFLAGVARIABLE(LuauTransitiveSubtyping, false)
LUAU_FASTFLAGVARIABLE(LuauOccursIsntAlwaysFailure, false)
@ -439,6 +441,30 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
if (superTy == subTy)
return;
if (log.get<TypeFamilyInstanceType>(superTy))
{
// We do not report errors from reducing here. This is because we will
// "double-report" errors in some cases, like when trying to unify
// identical type family instantiations like Add<false, false> with
// Add<false, false>.
reduceFamilies(superTy, location, NotNull(types), builtinTypes, &log);
superTy = log.follow(superTy);
}
if (log.get<TypeFamilyInstanceType>(subTy))
{
reduceFamilies(subTy, location, NotNull(types), builtinTypes, &log);
subTy = log.follow(subTy);
}
// If we can't reduce the families down and we still have type family types
// here, we are stuck. Nothing meaningful can be done here. We don't wish to
// report an error, either.
if (log.get<TypeFamilyInstanceType>(superTy) || log.get<TypeFamilyInstanceType>(subTy))
{
return;
}
auto superFree = log.getMutable<FreeType>(superTy);
auto subFree = log.getMutable<FreeType>(subTy);
@ -509,6 +535,49 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
return;
}
if (hideousFixMeGenericsAreActuallyFree)
{
auto superGeneric = log.getMutable<GenericType>(superTy);
auto subGeneric = log.getMutable<GenericType>(subTy);
if (superGeneric && subGeneric && subsumes(useScopes, superGeneric, subGeneric))
{
if (!occursCheck(subTy, superTy, /* reversed = */ false))
log.replace(subTy, BoundType(superTy));
return;
}
else if (superGeneric && subGeneric)
{
if (!occursCheck(superTy, subTy, /* reversed = */ true))
log.replace(superTy, BoundType(subTy));
return;
}
else if (superGeneric)
{
if (!occursCheck(superTy, subTy, /* reversed = */ true))
{
Widen widen{types, builtinTypes};
log.replace(superTy, BoundType(widen(subTy)));
}
return;
}
else if (subGeneric)
{
// Normally, if the subtype is free, it should not be bound to any, unknown, or error types.
// But for bug compatibility, we'll only apply this rule to unknown. Doing this will silence cascading type errors.
if (log.get<UnknownType>(superTy))
return;
if (!occursCheck(subTy, superTy, /* reversed = */ false))
log.replace(subTy, BoundType(superTy));
return;
}
}
if (log.get<AnyType>(superTy))
return tryUnifyWithAny(subTy, builtinTypes->anyType);
@ -687,8 +756,93 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
log.popSeen(superTy, subTy);
}
/*
* If the passed type is an option, strip nil out.
*
* There is an important subtlety to be observed here:
*
* We want to do a peephole fix to unify the subtype relation A? <: B? where we
* instead peel off the options and relate A <: B instead, but only works if we
* are certain that neither A nor B are themselves optional.
*
* For instance, if we want to test that (boolean?)? <: boolean?, we must peel
* off both layers of optionality from the subTy.
*
* We must also handle unions that have more than two choices.
*
* eg (string | nil)? <: boolean?
*/
static std::optional<TypeId> unwrapOption(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeArena> arena, const TxnLog& log, TypeId ty, DenseHashSet<TypeId>& seen)
{
if (seen.find(ty))
return std::nullopt;
seen.insert(ty);
const UnionType* ut = get<UnionType>(follow(ty));
if (!ut)
return std::nullopt;
if (2 == ut->options.size())
{
if (isNil(follow(ut->options[0])))
{
std::optional<TypeId> doubleUnwrapped = unwrapOption(builtinTypes, arena, log, ut->options[1], seen);
return doubleUnwrapped.value_or(ut->options[1]);
}
if (isNil(follow(ut->options[1])))
{
std::optional<TypeId> doubleUnwrapped = unwrapOption(builtinTypes, arena, log, ut->options[0], seen);
return doubleUnwrapped.value_or(ut->options[0]);
}
}
std::set<TypeId> newOptions;
bool found = false;
for (TypeId t : ut)
{
t = log.follow(t);
if (isNil(t))
{
found = true;
continue;
}
else
newOptions.insert(t);
}
if (!found)
return std::nullopt;
else if (newOptions.empty())
return builtinTypes->neverType;
else if (1 == newOptions.size())
return *begin(newOptions);
else
return arena->addType(UnionType{std::vector<TypeId>(begin(newOptions), end(newOptions))});
}
static std::optional<TypeId> unwrapOption(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeArena> arena, const TxnLog& log, TypeId ty)
{
DenseHashSet<TypeId> seen{nullptr};
return unwrapOption(builtinTypes, arena, log, ty, seen);
}
void Unifier::tryUnifyUnionWithType(TypeId subTy, const UnionType* subUnion, TypeId superTy)
{
// Peephole fix: A? <: B? if A <: B
//
// This works around issues that can arise if A or B is free. We do not
// want either of those types to be bound to nil.
if (FFlag::LuauUnifyTwoOptions)
{
if (auto subOption = unwrapOption(builtinTypes, NotNull{types}, log, subTy))
{
if (auto superOption = unwrapOption(builtinTypes, NotNull{types}, log, superTy))
return tryUnify_(*subOption, *superOption);
}
}
// A | B <: T if and only if A <: T and B <: T
bool failed = false;
bool errorsSuppressed = true;
@ -1205,6 +1359,25 @@ void Unifier::tryUnifyNormalizedTypes(
const ClassType* superCtv = get<ClassType>(superClass);
LUAU_ASSERT(superCtv);
if (FFlag::LuauUnifyTwoOptions)
{
if (variance == Invariant)
{
if (subCtv == superCtv)
{
found = true;
/*
* The only way we could care about superNegations is if
* one of them was equal to superCtv. However,
* normalization ensures that this is impossible.
*/
}
else
continue;
}
}
if (isSubclass(subCtv, superCtv))
{
found = true;
@ -1518,6 +1691,12 @@ struct WeirdIter
}
};
void Unifier::enableScopeTests()
{
useScopes = true;
log.useScopes = true;
}
ErrorVec Unifier::canUnify(TypeId subTy, TypeId superTy)
{
Unifier s = makeChildUnifier();
@ -1597,6 +1776,21 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
log.replace(subTp, Unifiable::Bound<TypePackId>(superTp));
}
}
else if (hideousFixMeGenericsAreActuallyFree && log.getMutable<GenericTypePack>(superTp))
{
if (!occursCheck(superTp, subTp, /* reversed = */ true))
{
Widen widen{types, builtinTypes};
log.replace(superTp, Unifiable::Bound<TypePackId>(widen(subTp)));
}
}
else if (hideousFixMeGenericsAreActuallyFree && log.getMutable<GenericTypePack>(subTp))
{
if (!occursCheck(subTp, superTp, /* reversed = */ false))
{
log.replace(subTp, Unifiable::Bound<TypePackId>(superTp));
}
}
else if (log.getMutable<Unifiable::Error>(superTp))
tryUnifyWithAny(subTp, superTp);
else if (log.getMutable<Unifiable::Error>(subTp))
@ -2611,7 +2805,10 @@ void Unifier::tryUnifyVariadics(TypePackId subTp, TypePackId superTp, bool rever
}
else if (get<GenericTypePack>(tail))
{
reportError(location, GenericError{"Cannot unify variadic and generic packs"});
if (!hideousFixMeGenericsAreActuallyFree)
reportError(location, GenericError{"Cannot unify variadic and generic packs"});
else
log.replace(tail, BoundTypePack{superTp});
}
else if (get<Unifiable::Error>(tail))
{
@ -2732,7 +2929,7 @@ std::optional<TypeId> Unifier::findTablePropertyRespectingMeta(TypeId lhsType, N
TxnLog Unifier::combineLogsIntoIntersection(std::vector<TxnLog> logs)
{
LUAU_ASSERT(FFlag::DebugLuauDeferredConstraintResolution);
TxnLog result;
TxnLog result(useScopes);
for (TxnLog& log : logs)
result.concatAsIntersections(std::move(log), NotNull{types});
return result;
@ -2741,7 +2938,7 @@ TxnLog Unifier::combineLogsIntoIntersection(std::vector<TxnLog> logs)
TxnLog Unifier::combineLogsIntoUnion(std::vector<TxnLog> logs)
{
LUAU_ASSERT(FFlag::DebugLuauDeferredConstraintResolution);
TxnLog result;
TxnLog result(useScopes);
for (TxnLog& log : logs)
result.concatAsUnion(std::move(log), NotNull{types});
return result;
@ -2807,7 +3004,7 @@ bool Unifier::occursCheck(DenseHashSet<TypeId>& seen, TypeId needle, TypeId hays
if (log.getMutable<ErrorType>(needle))
return false;
if (!log.getMutable<FreeType>(needle))
if (!log.getMutable<FreeType>(needle) && !(hideousFixMeGenericsAreActuallyFree && log.is<GenericType>(needle)))
ice("Expected needle to be free");
if (needle == haystack)
@ -2821,7 +3018,7 @@ bool Unifier::occursCheck(DenseHashSet<TypeId>& seen, TypeId needle, TypeId hays
return true;
}
if (log.getMutable<FreeType>(haystack))
if (log.getMutable<FreeType>(haystack) || (hideousFixMeGenericsAreActuallyFree && log.is<GenericType>(haystack)))
return false;
else if (auto a = log.getMutable<UnionType>(haystack))
{
@ -2865,7 +3062,7 @@ bool Unifier::occursCheck(DenseHashSet<TypePackId>& seen, TypePackId needle, Typ
if (log.getMutable<ErrorTypePack>(needle))
return false;
if (!log.getMutable<FreeTypePack>(needle))
if (!log.getMutable<FreeTypePack>(needle) && !(hideousFixMeGenericsAreActuallyFree && log.is<GenericTypePack>(needle)))
ice("Expected needle pack to be free");
RecursionLimiter _ra(&sharedState.counters.recursionCount, sharedState.counters.recursionLimit);
@ -2900,7 +3097,10 @@ Unifier Unifier::makeChildUnifier()
Unifier u = Unifier{normalizer, mode, scope, location, variance, &log};
u.normalize = normalize;
u.checkInhabited = checkInhabited;
u.useScopes = useScopes;
if (useScopes)
u.enableScopeTests();
return u;
}

View File

@ -27,6 +27,10 @@
#include <windows.h>
#endif
#ifdef __linux__
#include <unistd.h>
#endif
#ifdef CALLGRIND
#include <valgrind/callgrind.h>
#endif
@ -865,6 +869,7 @@ int replMain(int argc, char** argv)
int profile = 0;
bool coverage = false;
bool interactive = false;
bool codegenPerf = false;
// Set the mode if the user has explicitly specified one.
int argStart = 1;
@ -962,6 +967,11 @@ int replMain(int argc, char** argv)
{
codegen = true;
}
else if (strcmp(argv[i], "--codegen-perf") == 0)
{
codegen = true;
codegenPerf = true;
}
else if (strcmp(argv[i], "--coverage") == 0)
{
coverage = true;
@ -998,6 +1008,24 @@ int replMain(int argc, char** argv)
}
#endif
if (codegenPerf)
{
#if __linux__
char path[128];
snprintf(path, sizeof(path), "/tmp/perf-%d.map", getpid());
// note, there's no need to close the log explicitly as it will be closed when the process exits
FILE* codegenPerfLog = fopen(path, "w");
Luau::CodeGen::setPerfLog(codegenPerfLog, [](void* context, uintptr_t addr, unsigned size, const char* symbol) {
fprintf(static_cast<FILE*>(context), "%016lx %08x %s\n", long(addr), size, symbol);
});
#else
fprintf(stderr, "--codegen-perf option is only supported on Linux\n");
return 1;
#endif
}
const std::vector<std::string> files = getSourceFiles(argc, argv);
if (mode == CliMode::Unknown)
{

View File

@ -56,7 +56,7 @@ public:
void eor(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void bic(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void tst(RegisterA64 src1, RegisterA64 src2, int shift = 0);
void mvn(RegisterA64 dst, RegisterA64 src);
void mvn_(RegisterA64 dst, RegisterA64 src);
// Bitwise with immediate
// Note: immediate must have a single contiguous sequence of 1 bits set of length 1..31
@ -199,7 +199,7 @@ private:
void placeR1(const char* name, RegisterA64 dst, RegisterA64 src, uint32_t op);
void placeI12(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op);
void placeI16(const char* name, RegisterA64 dst, int src, uint8_t op, int shift = 0);
void placeA(const char* name, RegisterA64 dst, AddressA64 src, uint8_t op, uint8_t size, int sizelog);
void placeA(const char* name, RegisterA64 dst, AddressA64 src, uint16_t opsize, int sizelog);
void placeB(const char* name, Label& label, uint8_t op);
void placeBC(const char* name, Label& label, uint8_t op, uint8_t cond);
void placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond);

View File

@ -2,6 +2,7 @@
#pragma once
#include "Luau/Common.h"
#include "Luau/DenseHash.h"
#include "Luau/Label.h"
#include "Luau/ConditionX64.h"
#include "Luau/OperandX64.h"
@ -250,6 +251,8 @@ private:
std::vector<Label> pendingLabels;
std::vector<uint32_t> labelLocations;
DenseHashMap<uint64_t, int32_t> constCache64;
bool finalized = false;
size_t dataPos = 0;

View File

@ -13,5 +13,7 @@ namespace CodeGen
void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, size_t& startOffset);
void destroyBlockUnwindInfo(void* context, void* unwindData);
bool isUnwindSupported();
} // namespace CodeGen
} // namespace Luau

View File

@ -3,6 +3,8 @@
#include <string>
#include <stdint.h>
struct lua_State;
namespace Luau
@ -17,7 +19,7 @@ void create(lua_State* L);
// Builds target function and all inner functions
void compile(lua_State* L, int idx);
using annotatorFn = void (*)(void* context, std::string& result, int fid, int instpos);
using AnnotatorFn = void (*)(void* context, std::string& result, int fid, int instpos);
struct AssemblyOptions
{
@ -28,12 +30,16 @@ struct AssemblyOptions
bool includeOutlinedCode = false;
// Optional annotator function can be provided to describe each instruction, it takes function id and sequential instruction id
annotatorFn annotator = nullptr;
AnnotatorFn annotator = nullptr;
void* annotatorContext = nullptr;
};
// Generates assembly for target function and all inner functions
std::string getAssembly(lua_State* L, int idx, AssemblyOptions options = {});
using PerfLogFn = void (*)(void* context, uintptr_t addr, unsigned size, const char* symbol);
void setPerfLog(void* context, PerfLogFn logFn);
} // namespace CodeGen
} // namespace Luau

View File

@ -184,7 +184,7 @@ void AssemblyBuilderA64::tst(RegisterA64 src1, RegisterA64 src2, int shift)
placeSR3("tst", dst, src1, src2, 0b11'01010, shift);
}
void AssemblyBuilderA64::mvn(RegisterA64 dst, RegisterA64 src)
void AssemblyBuilderA64::mvn_(RegisterA64 dst, RegisterA64 src)
{
placeSR2("mvn", dst, src, 0b01'01010, 0b1);
}
@ -287,19 +287,19 @@ void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src)
switch (dst.kind)
{
case KindA64::w:
placeA("ldr", dst, src, 0b11100001, 0b10, /* sizelog= */ 2);
placeA("ldr", dst, src, 0b10'11100001, /* sizelog= */ 2);
break;
case KindA64::x:
placeA("ldr", dst, src, 0b11100001, 0b11, /* sizelog= */ 3);
placeA("ldr", dst, src, 0b11'11100001, /* sizelog= */ 3);
break;
case KindA64::s:
placeA("ldr", dst, src, 0b11110001, 0b10, /* sizelog= */ 2);
placeA("ldr", dst, src, 0b10'11110001, /* sizelog= */ 2);
break;
case KindA64::d:
placeA("ldr", dst, src, 0b11110001, 0b11, /* sizelog= */ 3);
placeA("ldr", dst, src, 0b11'11110001, /* sizelog= */ 3);
break;
case KindA64::q:
placeA("ldr", dst, src, 0b11110011, 0b00, /* sizelog= */ 4);
placeA("ldr", dst, src, 0b00'11110011, /* sizelog= */ 4);
break;
case KindA64::none:
LUAU_ASSERT(!"Unexpected register kind");
@ -310,35 +310,35 @@ void AssemblyBuilderA64::ldrb(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::w);
placeA("ldrb", dst, src, 0b11100001, 0b00, /* sizelog= */ 0);
placeA("ldrb", dst, src, 0b00'11100001, /* sizelog= */ 0);
}
void AssemblyBuilderA64::ldrh(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::w);
placeA("ldrh", dst, src, 0b11100001, 0b01, /* sizelog= */ 1);
placeA("ldrh", dst, src, 0b01'11100001, /* sizelog= */ 1);
}
void AssemblyBuilderA64::ldrsb(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
placeA("ldrsb", dst, src, 0b11100010 | uint8_t(dst.kind == KindA64::w), 0b00, /* sizelog= */ 0);
placeA("ldrsb", dst, src, 0b00'11100010 | uint8_t(dst.kind == KindA64::w), /* sizelog= */ 0);
}
void AssemblyBuilderA64::ldrsh(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
placeA("ldrsh", dst, src, 0b11100010 | uint8_t(dst.kind == KindA64::w), 0b01, /* sizelog= */ 1);
placeA("ldrsh", dst, src, 0b01'11100010 | uint8_t(dst.kind == KindA64::w), /* sizelog= */ 1);
}
void AssemblyBuilderA64::ldrsw(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x);
placeA("ldrsw", dst, src, 0b11100010, 0b10, /* sizelog= */ 2);
placeA("ldrsw", dst, src, 0b10'11100010, /* sizelog= */ 2);
}
void AssemblyBuilderA64::ldp(RegisterA64 dst1, RegisterA64 dst2, AddressA64 src)
@ -356,19 +356,19 @@ void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst)
switch (src.kind)
{
case KindA64::w:
placeA("str", src, dst, 0b11100000, 0b10, /* sizelog= */ 2);
placeA("str", src, dst, 0b10'11100000, /* sizelog= */ 2);
break;
case KindA64::x:
placeA("str", src, dst, 0b11100000, 0b11, /* sizelog= */ 3);
placeA("str", src, dst, 0b11'11100000, /* sizelog= */ 3);
break;
case KindA64::s:
placeA("str", src, dst, 0b11110000, 0b10, /* sizelog= */ 2);
placeA("str", src, dst, 0b10'11110000, /* sizelog= */ 2);
break;
case KindA64::d:
placeA("str", src, dst, 0b11110000, 0b11, /* sizelog= */ 3);
placeA("str", src, dst, 0b11'11110000, /* sizelog= */ 3);
break;
case KindA64::q:
placeA("str", src, dst, 0b11110010, 0b00, /* sizelog= */ 4);
placeA("str", src, dst, 0b00'11110010, /* sizelog= */ 4);
break;
case KindA64::none:
LUAU_ASSERT(!"Unexpected register kind");
@ -379,14 +379,14 @@ void AssemblyBuilderA64::strb(RegisterA64 src, AddressA64 dst)
{
LUAU_ASSERT(src.kind == KindA64::w);
placeA("strb", src, dst, 0b11100000, 0b00, /* sizelog= */ 0);
placeA("strb", src, dst, 0b00'11100000, /* sizelog= */ 0);
}
void AssemblyBuilderA64::strh(RegisterA64 src, AddressA64 dst)
{
LUAU_ASSERT(src.kind == KindA64::w);
placeA("strh", src, dst, 0b11100000, 0b01, /* sizelog= */ 1);
placeA("strh", src, dst, 0b01'11100000, /* sizelog= */ 1);
}
void AssemblyBuilderA64::stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst)
@ -487,9 +487,12 @@ void AssemblyBuilderA64::adr(RegisterA64 dst, Label& label)
void AssemblyBuilderA64::fmov(RegisterA64 dst, RegisterA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
LUAU_ASSERT(dst.kind == KindA64::d && (src.kind == KindA64::d || src.kind == KindA64::x));
placeR1("fmov", dst, src, 0b000'11110'01'1'0000'00'10000);
if (src.kind == KindA64::d)
placeR1("fmov", dst, src, 0b000'11110'01'1'0000'00'10000);
else
placeR1("fmov", dst, src, 0b000'11110'01'1'00'111'000000);
}
void AssemblyBuilderA64::fmov(RegisterA64 dst, double src)
@ -825,7 +828,7 @@ void AssemblyBuilderA64::placeI16(const char* name, RegisterA64 dst, int src, ui
commit();
}
void AssemblyBuilderA64::placeA(const char* name, RegisterA64 dst, AddressA64 src, uint8_t op, uint8_t size, int sizelog)
void AssemblyBuilderA64::placeA(const char* name, RegisterA64 dst, AddressA64 src, uint16_t opsize, int sizelog)
{
if (logText)
log(name, dst, src);
@ -833,15 +836,15 @@ void AssemblyBuilderA64::placeA(const char* name, RegisterA64 dst, AddressA64 sr
switch (src.kind)
{
case AddressKindA64::imm:
if (src.data >= 0 && (src.data >> sizelog) < 1024 && (src.data & ((1 << sizelog) - 1)) == 0)
place(dst.index | (src.base.index << 5) | ((src.data >> sizelog) << 10) | (op << 22) | (1 << 24) | (size << 30));
if (unsigned(src.data >> sizelog) < 1024 && (src.data & ((1 << sizelog) - 1)) == 0)
place(dst.index | (src.base.index << 5) | ((src.data >> sizelog) << 10) | (opsize << 22) | (1 << 24));
else if (src.data >= -256 && src.data <= 255)
place(dst.index | (src.base.index << 5) | ((src.data & ((1 << 9) - 1)) << 12) | (op << 22) | (size << 30));
place(dst.index | (src.base.index << 5) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22));
else
LUAU_ASSERT(!"Unable to encode large immediate offset");
break;
case AddressKindA64::reg:
place(dst.index | (src.base.index << 5) | (0b10 << 10) | (0b011 << 13) | (src.offset.index << 16) | (1 << 21) | (op << 22) | (size << 30));
place(dst.index | (src.base.index << 5) | (0b011'0'10 << 10) | (src.offset.index << 16) | (1 << 21) | (opsize << 22));
break;
}

View File

@ -75,6 +75,7 @@ static ABIX64 getCurrentX64ABI()
AssemblyBuilderX64::AssemblyBuilderX64(bool logText, ABIX64 abi)
: logText(logText)
, abi(abi)
, constCache64(~0ull)
{
data.resize(4096);
dataPos = data.size(); // data is filled backwards
@ -885,9 +886,22 @@ void AssemblyBuilderX64::setLabel(Label& label)
OperandX64 AssemblyBuilderX64::i64(int64_t value)
{
uint64_t as64BitKey = value;
if (as64BitKey != ~0ull)
{
if (int32_t* prev = constCache64.find(as64BitKey))
return OperandX64(SizeX64::qword, noreg, 1, rip, *prev);
}
size_t pos = allocateData(8, 8);
writeu64(&data[pos], value);
return OperandX64(SizeX64::qword, noreg, 1, rip, int32_t(pos - data.size()));
int32_t offset = int32_t(pos - data.size());
if (as64BitKey != ~0ull)
constCache64[as64BitKey] = offset;
return OperandX64(SizeX64::qword, noreg, 1, rip, offset);
}
OperandX64 AssemblyBuilderX64::f32(float value)
@ -899,9 +913,24 @@ OperandX64 AssemblyBuilderX64::f32(float value)
OperandX64 AssemblyBuilderX64::f64(double value)
{
uint64_t as64BitKey;
static_assert(sizeof(as64BitKey) == sizeof(value), "Expecting double to be 64-bit");
memcpy(&as64BitKey, &value, sizeof(value));
if (as64BitKey != ~0ull)
{
if (int32_t* prev = constCache64.find(as64BitKey))
return OperandX64(SizeX64::qword, noreg, 1, rip, *prev);
}
size_t pos = allocateData(8, 8);
writef64(&data[pos], value);
return OperandX64(SizeX64::qword, noreg, 1, rip, int32_t(pos - data.size()));
int32_t offset = int32_t(pos - data.size());
if (as64BitKey != ~0ull)
constCache64[as64BitKey] = offset;
return OperandX64(SizeX64::qword, noreg, 1, rip, offset);
}
OperandX64 AssemblyBuilderX64::f32x4(float x, float y, float z, float w)

View File

@ -16,7 +16,7 @@
#endif
#include <Windows.h>
#elif !defined(_WIN32)
#elif defined(__linux__) || defined(__APPLE__)
// Defined in unwind.h which may not be easily discoverable on various platforms
extern "C" void __register_frame(const void*);
@ -26,12 +26,16 @@ extern "C" void __unw_add_dynamic_fde() __attribute__((weak));
#endif
#if defined(__APPLE__) && defined(__aarch64__)
#include <sys/sysctl.h>
#endif
namespace Luau
{
namespace CodeGen
{
#if !defined(_WIN32)
#if defined(__linux__) || defined(__APPLE__)
static void visitFdeEntries(char* pos, void (*cb)(const void*))
{
// When using glibc++ unwinder, we need to call __register_frame/__deregister_frame on the entire .eh_frame data
@ -78,7 +82,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
LUAU_ASSERT(!"failed to allocate function table");
return nullptr;
}
#elif !defined(_WIN32)
#elif defined(__linux__) || defined(__APPLE__)
visitFdeEntries(unwindData, __register_frame);
#endif
@ -91,10 +95,26 @@ void destroyBlockUnwindInfo(void* context, void* unwindData)
#if defined(_WIN32) && defined(_M_X64)
if (!RtlDeleteFunctionTable((RUNTIME_FUNCTION*)unwindData))
LUAU_ASSERT(!"failed to deallocate function table");
#elif !defined(_WIN32)
#elif defined(__linux__) || defined(__APPLE__)
visitFdeEntries((char*)unwindData, __deregister_frame);
#endif
}
bool isUnwindSupported()
{
#if defined(_WIN32) && defined(_M_X64)
return true;
#elif defined(__APPLE__) && defined(__aarch64__)
char ver[256];
size_t verLength = sizeof(ver);
// libunwind on macOS 12 and earlier (which maps to osrelease 21) assumes JIT frames use pointer authentication without a way to override that
return sysctlbyname("kern.osrelease", ver, &verLength, NULL, 0) == 0 && atoi(ver) >= 22;
#elif defined(__linux__) || defined(__APPLE__)
return true;
#else
return false;
#endif
}
} // namespace CodeGen
} // namespace Luau

View File

@ -58,6 +58,9 @@ namespace Luau
namespace CodeGen
{
static void* gPerfLogContext = nullptr;
static PerfLogFn gPerfLogFn = nullptr;
static NativeProto* createNativeProto(Proto* proto, const IrBuilder& ir)
{
int sizecode = proto->sizecode;
@ -87,6 +90,20 @@ static void destroyNativeProto(NativeProto* nativeProto)
::operator delete(memory);
}
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
{
LUAU_ASSERT(p->source);
const char* source = getstr(p->source);
source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]";
char name[256];
snprintf(name, sizeof(name), "<luau> %s:%d %s", source, p->linedefined, p->debugname ? getstr(p->debugname) : "");
if (gPerfLogFn)
gPerfLogFn(gPerfLogContext, addr, size, name);
}
template<typename AssemblyBuilder, typename IrLowering>
static bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& function, int bytecodeid, AssemblyOptions options)
{
@ -329,24 +346,17 @@ static void onDestroyFunction(lua_State* L, Proto* proto)
static int onEnter(lua_State* L, Proto* proto)
{
if (L->singlestep)
return 1;
NativeState* data = getNativeState(L);
if (!L->ci->savedpc)
L->ci->savedpc = proto->code;
// We will jump into native code through a gateway
bool (*gate)(lua_State*, Proto*, uintptr_t, NativeContext*) = (bool (*)(lua_State*, Proto*, uintptr_t, NativeContext*))data->context.gateEntry;
NativeProto* nativeProto = getProtoExecData(proto);
LUAU_ASSERT(nativeProto);
LUAU_ASSERT(L->ci->savedpc);
// instOffsets uses negative indexing for optimal codegen for RETURN opcode
uintptr_t target = nativeProto->instBase + nativeProto->instOffsets[-(L->ci->savedpc - proto->code)];
uintptr_t target = nativeProto->instBase + nativeProto->instOffsets[proto->code - L->ci->savedpc];
// Returns 1 to finish the function in the VM
return gate(L, proto, target, &data->context);
return GateFn(data->context.gateEntry)(L, proto, target, &data->context);
}
static void onSetBreakpoint(lua_State* L, Proto* proto, int instruction)
@ -375,9 +385,9 @@ static unsigned int getCpuFeaturesA64()
bool isSupported()
{
#if !LUA_CUSTOM_EXECUTION
return false;
#elif defined(__x86_64__) || defined(_M_X64)
if (!LUA_CUSTOM_EXECUTION)
return false;
if (LUA_EXTRA_SIZE != 1)
return false;
@ -387,6 +397,16 @@ bool isSupported()
if (sizeof(LuaNode) != 32)
return false;
// Windows CRT uses stack unwinding in longjmp so we have to use unwind data; on other platforms, it's only necessary for C++ EH.
#if defined(_WIN32)
if (!isUnwindSupported())
return false;
#else
if (!LUA_USE_LONGJMP && !isUnwindSupported())
return false;
#endif
#if defined(__x86_64__) || defined(_M_X64)
int cpuinfo[4] = {};
#ifdef _MSC_VER
__cpuid(cpuinfo, 1);
@ -402,21 +422,6 @@ bool isSupported()
return true;
#elif defined(__aarch64__)
if (LUA_EXTRA_SIZE != 1)
return false;
if (sizeof(TValue) != 16)
return false;
if (sizeof(LuaNode) != 32)
return false;
#ifdef _WIN32
// Unwind info is not supported for Windows-on-ARM yet
if (!LUA_USE_LONGJMP)
return false;
#endif
return true;
#else
return false;
@ -456,6 +461,9 @@ void create(lua_State* L)
}
#endif
if (gPerfLogFn)
gPerfLogFn(gPerfLogContext, uintptr_t(data.context.gateEntry), 4096, "<luau gate>");
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
ecb->close = onCloseState;
@ -540,6 +548,20 @@ void compile(lua_State* L, int idx)
return;
}
if (gPerfLogFn && results.size() > 0)
{
gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), results[0]->instOffsets[0], "<luau helpers>");
for (size_t i = 0; i < results.size(); ++i)
{
uint32_t begin = results[i]->instOffsets[0];
uint32_t end = i + 1 < results.size() ? results[i + 1]->instOffsets[0] : uint32_t(build.code.size() * sizeof(build.code[0]));
LUAU_ASSERT(begin < end);
logPerfFunction(results[i]->proto, uintptr_t(codeStart) + begin, end - begin);
}
}
// Record instruction base address; at runtime, instOffsets[] will be used as offsets from instBase
for (NativeProto* result : results)
{
@ -591,5 +613,11 @@ std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
return build.text;
}
void setPerfLog(void* context, PerfLogFn logFn)
{
gPerfLogContext = context;
gPerfLogFn = logFn;
}
} // namespace CodeGen
} // namespace Luau

View File

@ -267,7 +267,7 @@ void callStepGc(IrRegAllocX64& regs, AssemblyBuilderX64& build)
void emitExit(AssemblyBuilderX64& build, bool continueInVm)
{
if (continueInVm)
build.mov(al, 1);
build.mov(eax, 1);
else
build.xor_(eax, eax);

View File

@ -1483,7 +1483,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
{
inst.regA64 = regs.allocReuse(KindA64::w, index, {inst.a});
RegisterA64 temp = tempUint(inst.a);
build.mvn(inst.regA64, temp);
build.mvn_(inst.regA64, temp);
break;
}
case IrCmd::BITLSHIFT_UINT:
@ -1660,8 +1660,28 @@ RegisterA64 IrLoweringA64::tempDouble(IrOp op)
{
RegisterA64 temp1 = regs.allocTemp(KindA64::x);
RegisterA64 temp2 = regs.allocTemp(KindA64::d);
build.adr(temp1, val);
build.ldr(temp2, temp1);
uint64_t vali;
static_assert(sizeof(vali) == sizeof(val), "Expecting double to be 64-bit");
memcpy(&vali, &val, sizeof(val));
if ((vali << 16) == 0)
{
build.movz(temp1, uint16_t(vali >> 48), 48);
build.fmov(temp2, temp1);
}
else if ((vali << 32) == 0)
{
build.movz(temp1, uint16_t(vali >> 48), 48);
build.movk(temp1, uint16_t(vali >> 32), 32);
build.fmov(temp2, temp1);
}
else
{
build.adr(temp1, val);
build.ldr(temp2, temp1);
}
return temp2;
}
}

View File

@ -65,21 +65,42 @@ void translateInstLoadN(IrBuilder& build, const Instruction* pc)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
}
static void translateInstLoadConstant(IrBuilder& build, int ra, int k)
{
TValue protok = build.function.proto->k[k];
// Compiler only generates LOADK for source-level constants, so dynamic imports are not affected
if (protok.tt == LUA_TNIL)
{
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNIL));
}
else if (protok.tt == LUA_TBOOLEAN)
{
build.inst(IrCmd::STORE_INT, build.vmReg(ra), build.constInt(protok.value.b));
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TBOOLEAN));
}
else if (protok.tt == LUA_TNUMBER)
{
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), build.constDouble(protok.value.n));
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
}
else
{
// Remaining tag here right now is LUA_TSTRING, while it can be transformed to LOAD_POINTER/STORE_POINTER/STORE_TAG, it's not profitable right
// now
IrOp load = build.inst(IrCmd::LOAD_TVALUE, build.vmConst(k));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), load);
}
}
void translateInstLoadK(IrBuilder& build, const Instruction* pc)
{
int ra = LUAU_INSN_A(*pc);
IrOp load = build.inst(IrCmd::LOAD_TVALUE, build.vmConst(LUAU_INSN_D(*pc)));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), load);
translateInstLoadConstant(build, LUAU_INSN_A(*pc), LUAU_INSN_D(*pc));
}
void translateInstLoadKX(IrBuilder& build, const Instruction* pc)
{
int ra = LUAU_INSN_A(*pc);
uint32_t aux = pc[1];
IrOp load = build.inst(IrCmd::LOAD_TVALUE, build.vmConst(aux));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), load);
translateInstLoadConstant(build, LUAU_INSN_A(*pc), pc[1]);
}
void translateInstMove(IrBuilder& build, const Instruction* pc)

View File

@ -108,6 +108,8 @@ struct NativeContext
luau_FastFunction luauF_table[256] = {};
};
using GateFn = int (*)(lua_State*, Proto*, uintptr_t, NativeContext*);
struct NativeState
{
NativeState();

View File

@ -14,6 +14,8 @@ inline bool isFlagExperimental(const char* flag)
"LuauInstantiateInSubtyping", // requires some fixes to lua-apps code
"LuauTypecheckTypeguards", // requires some fixes to lua-apps code (CLI-67030)
"LuauTinyControlFlowAnalysis", // waiting for updates to packages depended by internal builtin plugins
"LuauUnifyTwoOptions", // requires some fixes to lua-apps code
// makes sure we always have at least one entry
nullptr,
};

View File

@ -180,6 +180,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/TypeAttach.h
Analysis/include/Luau/TypeChecker2.h
Analysis/include/Luau/TypedAllocator.h
Analysis/include/Luau/TypeFamily.h
Analysis/include/Luau/TypeInfer.h
Analysis/include/Luau/TypePack.h
Analysis/include/Luau/TypeReduction.h
@ -230,6 +231,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/TypeAttach.cpp
Analysis/src/TypeChecker2.cpp
Analysis/src/TypedAllocator.cpp
Analysis/src/TypeFamily.cpp
Analysis/src/TypeInfer.cpp
Analysis/src/TypePack.cpp
Analysis/src/TypeReduction.cpp
@ -382,6 +384,8 @@ if(TARGET Luau.UnitTest)
tests/TopoSort.test.cpp
tests/ToString.test.cpp
tests/Transpiler.test.cpp
tests/TxnLog.test.cpp
tests/TypeFamily.test.cpp
tests/TypeInfer.aliases.test.cpp
tests/TypeInfer.annotations.test.cpp
tests/TypeInfer.anyerror.test.cpp

View File

@ -12,6 +12,8 @@
#include <string.h>
#include <stdio.h>
LUAU_FASTFLAGVARIABLE(LuauFixBreakpointLineSearch, false)
static const char* getfuncname(Closure* f);
static int currentpc(lua_State* L, CallInfo* ci)
@ -423,11 +425,23 @@ static int getnextline(Proto* p, int line)
if (LUAU_INSN_OP(p->code[i]) == LOP_PREPVARARGS)
continue;
int current = luaG_getline(p, i);
if (current >= line)
int candidate = luaG_getline(p, i);
if (FFlag::LuauFixBreakpointLineSearch)
{
closest = current;
break;
if (candidate == line)
return line;
if (candidate > line && (closest == -1 || candidate < closest))
closest = candidate;
}
else
{
if (candidate >= line)
{
closest = candidate;
break;
}
}
}
}
@ -436,9 +450,21 @@ static int getnextline(Proto* p, int line)
{
// Find the closest line number to the intended one.
int candidate = getnextline(p->p[i], line);
if (closest == -1 || (candidate >= line && candidate < closest))
if (FFlag::LuauFixBreakpointLineSearch)
{
closest = candidate;
if (candidate == line)
return line;
if (candidate > line && (closest == -1 || candidate < closest))
closest = candidate;
}
else
{
if (closest == -1 || (candidate >= line && candidate < closest))
{
closest = candidate;
}
}
}

View File

@ -210,7 +210,7 @@ static void luau_execute(lua_State* L)
#if LUA_CUSTOM_EXECUTION
Proto* p = clvalue(L->ci->func)->l.p;
if (p->execdata)
if (p->execdata && !SingleStep)
{
if (L->global->ecb.enter(L, p) == 0)
return;
@ -952,9 +952,9 @@ reentry:
L->top = p->is_vararg ? argi : ci->top;
#if LUA_CUSTOM_EXECUTION
if (p->execdata)
if (LUAU_UNLIKELY(p->execdata && !SingleStep))
{
LUAU_ASSERT(L->global->ecb.enter);
ci->savedpc = p->code;
if (L->global->ecb.enter(L, p) == 1)
goto reentry;
@ -1050,10 +1050,8 @@ reentry:
Proto* nextproto = nextcl->l.p;
#if LUA_CUSTOM_EXECUTION
if (nextproto->execdata)
if (LUAU_UNLIKELY(nextproto->execdata && !SingleStep))
{
LUAU_ASSERT(L->global->ecb.enter);
if (L->global->ecb.enter(L, nextproto) == 1)
goto reentry;
else

View File

@ -0,0 +1,39 @@
local bench = script and require(script.Parent.bench_support) or require("bench_support")
local function mmul(matrix1, matrix2)
local shapeRows = #matrix1
local shapeColumns = #matrix2[1]
local result = table.create(shapeRows)
for i = 1, shapeRows do
result[i] = table.create(shapeColumns)
for j = 1, shapeColumns do
local sum = 0
for k = 1, shapeColumns do
sum = sum + matrix1[i][k] * matrix2[k][j]
end
result[i][j] = sum
end
end
return result
end
function test()
local n = 100
local mat = table.create(n)
for i = 1, n do
local t = table.create(n)
for k = 1, n do
t[k] = math.random()
end
mat[i] = t
end
local startTime = os.clock()
local result = mmul(mat, mat)
return os.clock() - startTime
end
bench.runCode(test, "matrixmult")

View File

@ -0,0 +1,254 @@
--!strict
local bench = script and require(script.Parent.bench_support) or require("bench_support")
function test()
type Vertex = {
pX: number, pY: number, pZ: number,
uvX: number, uvY: number, uvZ: number,
nX: number, nY: number, nZ: number,
tX: number, tY: number, tZ: number,
bX: number, bY: number, bZ: number,
h: number
}
local grid_size = 100
local mesh: {
vertices: {Vertex},
indices: {number},
triangle_cone_p: {{x: number, y: number, z: number}},
triangle_cone_n: {{x: number, y: number, z: number}}
} = {
vertices = table.create(grid_size * grid_size),
indices = table.create((grid_size - 1) * (grid_size - 1) * 6),
triangle_cone_p = table.create((grid_size - 1) * (grid_size - 1) * 2),
triangle_cone_n = table.create((grid_size - 1) * (grid_size - 1) * 2)
}
local function init_vertices()
local i = 1
for y = 1,grid_size do
for x = 1,grid_size do
local v: Vertex = {}
v.pX = x
v.pY = y
v.pZ = math.cos(x) + math.sin(y)
v.uvX = (x-1)/(grid_size-1)
v.uvY = (y-1)/(grid_size-1)
v.uvZ = 0
v.nX = 0
v.nY = 0
v.nZ = 0
v.bX = 0
v.bY = 0
v.bZ = 0
v.tX = 0
v.tY = 0
v.tZ = 0
v.h = 0
mesh.vertices[i] = v
i += 1
end
end
end
local function init_indices()
local i = 1
for y = 1,grid_size-1 do
for x = 1,grid_size-1 do
mesh.indices[i] = x + (y-1)*grid_size
i += 1
mesh.indices[i] = x + y*grid_size
i += 1
mesh.indices[i] = (x+1) + (y-1)*grid_size
i += 1
mesh.indices[i] = (x+1) + (y-1)*grid_size
i += 1
mesh.indices[i] = x + y*grid_size
i += 1
mesh.indices[i] = (x+1) + y*grid_size
i += 1
end
end
end
local function calculate_normals()
local norm_sum = 0
for i = 1,#mesh.indices,3 do
local a = mesh.vertices[mesh.indices[i]]
local b = mesh.vertices[mesh.indices[i + 1]]
local c = mesh.vertices[mesh.indices[i + 2]]
local abx = a.pX - b.pX
local aby = a.pY - b.pY
local abz = a.pZ - b.pZ
local acx = a.pX - c.pX
local acy = a.pY - c.pY
local acz = a.pZ - c.pZ
local nx = aby * acz - abz * acy;
local ny = abz * acx - abx * acz;
local nz = abx * acy - aby * acx;
a.nX += nx
a.nY += ny
a.nZ += nz
b.nX += nx
b.nY += ny
b.nZ += nz
c.nX += nx
c.nY += ny
c.nZ += nz
end
for _,v in mesh.vertices do
local magnitude = math.sqrt(v.nX * v.nX + v.nY * v.nY + v.nZ * v.nZ)
v.nX /= magnitude
v.nY /= magnitude
v.nZ /= magnitude
norm_sum += v.nX * v.nX + v.nY * v.nY + v.nZ * v.nZ
end
return norm_sum
end
local function compute_triangle_cones()
local mesh_area = 0
local pos = 1
for i = 1,#mesh.indices,3 do
local p0 = mesh.vertices[mesh.indices[i]]
local p1 = mesh.vertices[mesh.indices[i + 1]]
local p2 = mesh.vertices[mesh.indices[i + 2]]
local p10x = p1.pX - p0.pX
local p10y = p1.pY - p0.pY
local p10z = p1.pZ - p0.pZ
local p20x = p2.pX - p0.pX
local p20y = p2.pY - p0.pY
local p20z = p2.pZ - p0.pZ
local normalx = p10y * p20z - p10z * p20y;
local normaly = p10z * p20x - p10x * p20z;
local normalz = p10x * p20y - p10y * p20x;
local area = math.sqrt(normalx * normalx + normaly * normaly + normalz * normalz)
local invarea = if area == 0 then 0 else 1 / area;
local rx = (p0.pX + p1.pX + p2.pX) / 3
local ry = (p0.pY + p1.pY + p2.pY) / 3
local rz = (p0.pZ + p1.pZ + p2.pZ) / 3
mesh.triangle_cone_p[pos] = { x = rx, y = ry, z = rz }
mesh.triangle_cone_n[pos] = { x = normalx * invarea, y = normaly * invarea, z = normalz * invarea}
pos += 1
mesh_area += area
end
return mesh_area
end
local function compute_tangent_space()
local checksum = 0
for i = 1,#mesh.indices,3 do
local a = mesh.vertices[mesh.indices[i]]
local b = mesh.vertices[mesh.indices[i + 1]]
local c = mesh.vertices[mesh.indices[i + 2]]
local x1 = b.pX - a.pX
local x2 = c.pX - a.pX
local y1 = b.pY - a.pY
local y2 = c.pY - a.pY
local z1 = b.pZ - a.pZ
local z2 = c.pZ - a.pZ
local s1 = b.uvX - a.uvX
local s2 = c.uvX - a.uvX
local t1 = b.uvY - a.uvY
local t2 = c.uvY - a.uvY
local r = 1.0 / (s1 * t2 - s2 * t1);
local sdirX = (t2 * x1 - t1 * x2) * r
local sdirY = (t2 * y1 - t1 * y2) * r
local sdirZ = (t2 * z1 - t1 * z2) * r
local tdirX = (s1 * x2 - s2 * x1) * r
local tdirY = (s1 * y2 - s2 * y1) * r
local tdirZ = (s1 * z2 - s2 * z1) * r
a.tX += sdirX
a.tY += sdirY
a.tZ += sdirZ
b.tX += sdirX
b.tY += sdirY
b.tZ += sdirZ
c.tX += sdirX
c.tY += sdirY
c.tZ += sdirZ
a.bX += tdirX
a.bY += tdirY
a.bZ += tdirZ
b.bX += tdirX
b.bY += tdirY
b.bZ += tdirZ
c.bX += tdirX
c.bY += tdirY
c.bZ += tdirZ
end
for _,v in mesh.vertices do
local tX = v.tX
local tY = v.tY
local tZ = v.tZ
-- Gram-Schmidt orthogonalize
local ndt = v.nX * tX + v.nY * tY + v.nZ * tZ
local tmnsX = tX - v.nX * ndt
local tmnsY = tY - v.nY * ndt
local tmnsZ = tZ - v.nZ * ndt
local l = math.sqrt(tmnsX * tmnsX + tmnsY * tmnsY + tmnsZ * tmnsZ)
local invl = 1 / l
v.tX = tmnsX * invl
v.tY = tmnsY * invl
v.tZ = tmnsZ * invl
local normalx = v.nY * tZ - v.nZ * tY;
local normaly = v.nZ * tX - v.nX * tZ;
local normalz = v.nX * tY - v.nY * tX;
local ht = normalx * v.bX + normaly * v.bY + normalz * v.bZ
v.h = ht < 0 and -1 or 1
checksum += v.tX + v.h
end
return checksum
end
init_vertices()
init_indices()
calculate_normals()
compute_triangle_cones()
compute_tangent_space()
end
bench.runCode(test, "mesh-normal-scalar")

View File

@ -70,7 +70,7 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Unary")
{
SINGLE_COMPARE(neg(x0, x1), 0xCB0103E0);
SINGLE_COMPARE(neg(w0, w1), 0x4B0103E0);
SINGLE_COMPARE(mvn(x0, x1), 0xAA2103E0);
SINGLE_COMPARE(mvn_(x0, x1), 0xAA2103E0);
SINGLE_COMPARE(clz(x0, x1), 0xDAC01020);
SINGLE_COMPARE(clz(w0, w1), 0x5AC01020);
@ -338,6 +338,7 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "AddressOfLabel")
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPBasic")
{
SINGLE_COMPARE(fmov(d0, d1), 0x1E604020);
SINGLE_COMPARE(fmov(d0, x1), 0x9E670020);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPMath")

View File

@ -684,15 +684,30 @@ TEST_CASE("ConstantStorage")
build.finalize();
LUAU_ASSERT(build.data.size() == 12004);
CHECK(build.data.size() == 12004);
for (int i = 0; i <= 3000; i++)
{
LUAU_ASSERT(build.data[i * 4 + 0] == 0x00);
LUAU_ASSERT(build.data[i * 4 + 1] == 0x00);
LUAU_ASSERT(build.data[i * 4 + 2] == 0x80);
LUAU_ASSERT(build.data[i * 4 + 3] == 0x3f);
CHECK(build.data[i * 4 + 0] == 0x00);
CHECK(build.data[i * 4 + 1] == 0x00);
CHECK(build.data[i * 4 + 2] == 0x80);
CHECK(build.data[i * 4 + 3] == 0x3f);
}
}
TEST_CASE("ConstantCaching")
{
AssemblyBuilderX64 build(/* logText= */ false);
OperandX64 two = build.f64(2);
// Force data relocation
for (int i = 0; i < 4096; i++)
build.f64(i);
CHECK(build.f64(2).imm == two.imm);
build.finalize();
}
TEST_SUITE_END();

View File

@ -105,6 +105,19 @@ ClassFixture::ClassFixture()
};
globals.globalScope->exportedTypeBindings["CallableClass"] = TypeFun{{}, callableClassType};
auto addIndexableClass = [&arena, &globals](const char* className, TypeId keyType, TypeId returnType) {
ScopedFastFlag LuauTypecheckClassTypeIndexers("LuauTypecheckClassTypeIndexers", true);
TypeId indexableClassMetaType = arena.addType(TableType{});
TypeId indexableClassType =
arena.addType(ClassType{className, {}, nullopt, indexableClassMetaType, {}, {}, "Test", TableIndexer{keyType, returnType}});
globals.globalScope->exportedTypeBindings[className] = TypeFun{{}, indexableClassType};
};
// IndexableClass has a table indexer with a key type of 'number | string' and a return type of 'number'
addIndexableClass("IndexableClass", arena.addType(Luau::UnionType{{builtinTypes->stringType, numberType}}), numberType);
// IndexableNumericKeyClass has a table indexer with a key type of 'number' and a return type of 'number'
addIndexableClass("IndexableNumericKeyClass", numberType, numberType);
for (const auto& [name, tf] : globals.globalScope->exportedTypeBindings)
persist(tf.type);

View File

@ -646,6 +646,10 @@ static void throwing(int64_t arg)
TEST_CASE("GeneratedCodeExecutionWithThrowA64")
{
// macOS 12 doesn't support JIT frames without pointer authentication
if (!isUnwindSupported())
return;
using namespace A64;
AssemblyBuilderA64 build(/* logText= */ false);

View File

@ -561,6 +561,8 @@ TEST_CASE("Debug")
TEST_CASE("Debugger")
{
ScopedFastFlag luauFixBreakpointLineSearch{"LuauFixBreakpointLineSearch", true};
static int breakhits = 0;
static lua_State* interruptedthread = nullptr;
static bool singlestep = false;
@ -703,6 +705,15 @@ TEST_CASE("Debugger")
CHECK(lua_tointeger(L, -1) == 9);
lua_pop(L, 1);
}
else if (breakhits == 13)
{
// validate assignment via lua_getlocal
const char* l = lua_getlocal(L, 0, 1);
REQUIRE(l);
CHECK(strcmp(l, "a") == 0);
CHECK(lua_isnil(L, -1));
lua_pop(L, 1);
}
if (interruptedthread)
{
@ -712,7 +723,7 @@ TEST_CASE("Debugger")
},
nullptr, &copts, /* skipCodegen */ true); // Native code doesn't support debugging yet
CHECK(breakhits == 12); // 2 hits per breakpoint
CHECK(breakhits == 14); // 2 hits per breakpoint
if (singlestep)
CHECK(stephits > 100); // note; this will depend on number of instructions which can vary, so we just make sure the callback gets hit often

113
tests/TxnLog.test.cpp Normal file
View File

@ -0,0 +1,113 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "doctest.h"
#include "Luau/Scope.h"
#include "Luau/ToString.h"
#include "Luau/TxnLog.h"
#include "Luau/Type.h"
#include "Luau/TypeArena.h"
#include "ScopedFlags.h"
using namespace Luau;
struct TxnLogFixture
{
TxnLog log{/*useScopes*/ true};
TxnLog log2{/*useScopes*/ true};
TypeArena arena;
BuiltinTypes builtinTypes;
ScopePtr globalScope = std::make_shared<Scope>(builtinTypes.anyTypePack);
ScopePtr childScope = std::make_shared<Scope>(globalScope);
TypeId a = arena.freshType(globalScope.get());
TypeId b = arena.freshType(globalScope.get());
TypeId c = arena.freshType(childScope.get());
};
TEST_SUITE_BEGIN("TxnLog");
TEST_CASE_FIXTURE(TxnLogFixture, "colliding_union_incoming_type_has_greater_scope")
{
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
log.replace(c, BoundType{a});
log2.replace(a, BoundType{c});
CHECK(nullptr != log.pending(c));
log.concatAsUnion(std::move(log2), NotNull{&arena});
// 'a has greater scope than 'c, so we expect the incoming binding of 'a to
// be discarded.
CHECK(nullptr == log.pending(a));
const PendingType* pt = log.pending(c);
REQUIRE(pt != nullptr);
CHECK(!pt->dead);
const BoundType* bt = get_if<BoundType>(&pt->pending.ty);
CHECK(a == bt->boundTo);
log.commit();
REQUIRE(get<FreeType>(a));
const BoundType* bound = get<BoundType>(c);
REQUIRE(bound);
CHECK(a == bound->boundTo);
}
TEST_CASE_FIXTURE(TxnLogFixture, "colliding_union_incoming_type_has_lesser_scope")
{
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
log.replace(a, BoundType{c});
log2.replace(c, BoundType{a});
CHECK(nullptr != log.pending(a));
log.concatAsUnion(std::move(log2), NotNull{&arena});
// 'a has greater scope than 'c, so we expect the binding of 'a to be
// discarded, and for that of 'c to be brought in.
CHECK(nullptr == log.pending(a));
const PendingType* pt = log.pending(c);
REQUIRE(pt != nullptr);
CHECK(!pt->dead);
const BoundType* bt = get_if<BoundType>(&pt->pending.ty);
CHECK(a == bt->boundTo);
log.commit();
REQUIRE(get<FreeType>(a));
const BoundType* bound = get<BoundType>(c);
REQUIRE(bound);
CHECK(a == bound->boundTo);
}
TEST_CASE_FIXTURE(TxnLogFixture, "colliding_coincident_logs_do_not_create_degenerate_unions")
{
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
log.replace(a, BoundType{b});
log2.replace(a, BoundType{b});
log.concatAsUnion(std::move(log2), NotNull{&arena});
log.commit();
CHECK("a" == toString(a));
CHECK("a" == toString(b));
}
TEST_SUITE_END();

205
tests/TypeFamily.test.cpp Normal file
View File

@ -0,0 +1,205 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/TypeFamily.h"
#include "Luau/Type.h"
#include "Fixture.h"
#include "doctest.h"
using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
struct FamilyFixture : Fixture
{
TypeFamily swapFamily;
FamilyFixture()
: Fixture(true, false)
{
swapFamily = TypeFamily{/* name */ "Swap",
/* reducer */
[](std::vector<TypeId> tys, std::vector<TypePackId> tps, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins,
NotNull<const TxnLog> log) -> TypeFamilyReductionResult<TypeId> {
LUAU_ASSERT(tys.size() == 1);
TypeId param = log->follow(tys.at(0));
if (isString(param))
{
return TypeFamilyReductionResult<TypeId>{builtins->numberType, false, {}, {}};
}
else if (isNumber(param))
{
return TypeFamilyReductionResult<TypeId>{builtins->stringType, false, {}, {}};
}
else if (log->get<BlockedType>(param) || log->get<FreeType>(param) || log->get<PendingExpansionType>(param) ||
log->get<TypeFamilyInstanceType>(param))
{
return TypeFamilyReductionResult<TypeId>{std::nullopt, false, {param}, {}};
}
else
{
return TypeFamilyReductionResult<TypeId>{std::nullopt, true, {}, {}};
}
}};
unfreeze(frontend.globals.globalTypes);
TypeId t = frontend.globals.globalTypes.addType(GenericType{"T"});
GenericTypeDefinition genericT{t};
ScopePtr globalScope = frontend.globals.globalScope;
globalScope->exportedTypeBindings["Swap"] =
TypeFun{{genericT}, frontend.globals.globalTypes.addType(TypeFamilyInstanceType{NotNull{&swapFamily}, {t}, {}})};
freeze(frontend.globals.globalTypes);
}
};
TEST_SUITE_BEGIN("TypeFamilyTests");
TEST_CASE_FIXTURE(FamilyFixture, "basic_type_family")
{
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
type A = Swap<number>
type B = Swap<string>
type C = Swap<boolean>
local x = 123
local y: Swap<typeof(x)> = "foo"
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK("string" == toString(requireTypeAlias("A")));
CHECK("number" == toString(requireTypeAlias("B")));
CHECK("Swap<boolean>" == toString(requireTypeAlias("C")));
CHECK("string" == toString(requireType("y")));
CHECK("Type family instance Swap<boolean> is uninhabited" == toString(result.errors[0]));
};
TEST_CASE_FIXTURE(FamilyFixture, "type_reduction_reduces_families")
{
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
local x: Swap<string> & nil
)");
CHECK("never" == toString(requireType("x")));
}
TEST_CASE_FIXTURE(FamilyFixture, "family_as_fn_ret")
{
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
local swapper: <T>(T) -> Swap<T>
local a = swapper(123)
local b = swapper("foo")
local c = swapper(false)
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK("string" == toString(requireType("a")));
CHECK("number" == toString(requireType("b")));
CHECK("Swap<boolean>" == toString(requireType("c")));
CHECK("Type family instance Swap<boolean> is uninhabited" == toString(result.errors[0]));
}
TEST_CASE_FIXTURE(FamilyFixture, "family_as_fn_arg")
{
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
local swapper: <T>(Swap<T>) -> T
local a = swapper(123)
local b = swapper(false)
)");
LUAU_REQUIRE_ERROR_COUNT(2, result);
// FIXME: Can we constrain these to `never` or `unknown`?
CHECK("a" == toString(requireType("a")));
CHECK("a" == toString(requireType("b")));
CHECK("Type family instance Swap<a> is uninhabited" == toString(result.errors[0]));
CHECK("Type family instance Swap<a> is uninhabited" == toString(result.errors[1]));
}
TEST_CASE_FIXTURE(FamilyFixture, "resolve_deep_families")
{
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
local x: Swap<Swap<Swap<string>>>
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("number" == toString(requireType("x")));
}
TEST_CASE_FIXTURE(FamilyFixture, "unsolvable_family")
{
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
local impossible: <T>(Swap<T>) -> Swap<Swap<T>>
local a = impossible(123)
local b = impossible(true)
)");
LUAU_REQUIRE_ERROR_COUNT(4, result);
for (size_t i = 0; i < 4; ++i)
{
CHECK(toString(result.errors[i]) == "Type family instance Swap<a> is uninhabited");
}
}
TEST_CASE_FIXTURE(FamilyFixture, "table_internal_families")
{
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
local t: <T>({T}) -> {Swap<T>}
local a = t({1, 2, 3})
local b = t({"a", "b", "c"})
local c = t({true, false, true})
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK(toString(requireType("a")) == "{string}");
CHECK(toString(requireType("b")) == "{number}");
CHECK(toString(requireType("c")) == "{Swap<boolean>}");
CHECK(toString(result.errors[0]) == "Type family instance Swap<boolean> is uninhabited");
}
TEST_CASE_FIXTURE(FamilyFixture, "function_internal_families")
{
// This test is broken right now, but it's not because of type families. See
// CLI-71143.
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
local f0: <T>(T) -> (() -> T)
local f: <T>(T) -> (() -> Swap<T>)
local a = f(1)
local b = f("a")
local c = f(true)
local d = f0(1)
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK(toString(requireType("a")) == "() -> string");
CHECK(toString(requireType("b")) == "() -> number");
CHECK(toString(requireType("c")) == "() -> Swap<boolean>");
CHECK(toString(result.errors[0]) == "Type family instance Swap<boolean> is uninhabited");
}
TEST_SUITE_END();

View File

@ -481,4 +481,150 @@ TEST_CASE_FIXTURE(ClassFixture, "callable_classes")
CHECK_EQ("number", toString(requireType("y")));
}
TEST_CASE_FIXTURE(ClassFixture, "indexable_classes")
{
// Test reading from an index
ScopedFastFlag LuauTypecheckClassTypeIndexers("LuauTypecheckClassTypeIndexers", true);
{
CheckResult result = check(R"(
local x : IndexableClass
local y = x.stringKey
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
{
CheckResult result = check(R"(
local x : IndexableClass
local y = x["stringKey"]
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
{
CheckResult result = check(R"(
local x : IndexableClass
local str : string
local y = x[str] -- Index with a non-const string
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
{
CheckResult result = check(R"(
local x : IndexableClass
local y = x[7] -- Index with a numeric key
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
// Test writing to an index
{
CheckResult result = check(R"(
local x : IndexableClass
x.stringKey = 42
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
{
CheckResult result = check(R"(
local x : IndexableClass
x["stringKey"] = 42
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
{
CheckResult result = check(R"(
local x : IndexableClass
local str : string
x[str] = 42 -- Index with a non-const string
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
{
CheckResult result = check(R"(
local x : IndexableClass
x[1] = 42 -- Index with a numeric key
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
// Try to index the class using an invalid type for the key (key type is 'number | string'.)
{
CheckResult result = check(R"(
local x : IndexableClass
local y = x[true]
)");
CHECK_EQ(
toString(result.errors[0]), "Type 'boolean' could not be converted into 'number | string'; none of the union options are compatible");
}
{
CheckResult result = check(R"(
local x : IndexableClass
x[true] = 42
)");
CHECK_EQ(
toString(result.errors[0]), "Type 'boolean' could not be converted into 'number | string'; none of the union options are compatible");
}
// Test type checking for the return type of the indexer (i.e. a number)
{
CheckResult result = check(R"(
local x : IndexableClass
x.key = "string value"
)");
CHECK_EQ(toString(result.errors[0]), "Type 'string' could not be converted into 'number'");
}
{
CheckResult result = check(R"(
local x : IndexableClass
local str : string = x.key
)");
CHECK_EQ(toString(result.errors[0]), "Type 'number' could not be converted into 'string'");
}
// Check that we string key are rejected if the indexer's key type is not compatible with string
{
CheckResult result = check(R"(
local x : IndexableNumericKeyClass
x.key = 1
)");
CHECK_EQ(toString(result.errors.at(0)), "Key 'key' not found in class 'IndexableNumericKeyClass'");
}
{
CheckResult result = check(R"(
local x : IndexableNumericKeyClass
x["key"] = 1
)");
CHECK_EQ(toString(result.errors[0]), "Type 'string' could not be converted into 'number'");
}
{
CheckResult result = check(R"(
local x : IndexableNumericKeyClass
local str : string
x[str] = 1 -- Index with a non-const string
)");
CHECK_EQ(toString(result.errors[0]), "Type 'string' could not be converted into 'number'");
}
{
CheckResult result = check(R"(
local x : IndexableNumericKeyClass
local y = x.key
)");
CHECK_EQ(toString(result.errors[0]), "Key 'key' not found in class 'IndexableNumericKeyClass'");
}
{
CheckResult result = check(R"(
local x : IndexableNumericKeyClass
local y = x["key"]
)");
CHECK_EQ(toString(result.errors[0]), "Type 'string' could not be converted into 'number'");
}
{
CheckResult result = check(R"(
local x : IndexableNumericKeyClass
local str : string
local y = x[str] -- Index with a non-const string
)");
CHECK_EQ(toString(result.errors[0]), "Type 'string' could not be converted into 'number'");
}
}
TEST_SUITE_END();

View File

@ -1952,4 +1952,40 @@ TEST_CASE_FIXTURE(Fixture, "instantiated_type_packs_must_have_a_non_null_scope")
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "inner_frees_become_generic_in_dcr")
{
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
function f(x)
local z = x
return x
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
std::optional<TypeId> ty = findTypeAtPosition(Position{3, 19});
REQUIRE(ty);
CHECK(get<GenericType>(*ty));
}
TEST_CASE_FIXTURE(Fixture, "function_exprs_are_generalized_at_signature_scope_not_enclosing")
{
CheckResult result = check(R"(
local foo
local bar
-- foo being a function expression is deliberate: the bug we're testing
-- only existed for function expressions, not for function statements.
foo = function(a)
return bar
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
// note that b is not in the generic list; it is free, the unconstrained type of `bar`.
CHECK(toString(requireType("foo")) == "<a>(a) -> b");
}
TEST_SUITE_END();

View File

@ -550,6 +550,8 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables")
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_top_properties")
{
ScopedFastFlag sff{"LuauUnifyTwoOptions", true};
CheckResult result = check(R"(
local x : { p : number?, q : any } & { p : unknown, q : string? }
local y : { p : number?, q : string? } = x -- OK
@ -563,27 +565,19 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_top_properties")
CHECK_EQ(toString(result.errors[0]),
"Type '{| p: number?, q: string? |}' could not be converted into '{| p: string?, q: number? |}'\n"
"caused by:\n"
" Property 'p' is not compatible. Type 'number?' could not be converted into 'string?'\n"
"caused by:\n"
" Not all union options are compatible. Type 'number' could not be converted into 'string?'\n"
"caused by:\n"
" None of the union options are compatible. For example: Type 'number' could not be converted into 'string' in an invariant context");
" Property 'p' is not compatible. Type 'number' could not be converted into 'string' in an invariant context");
CHECK_EQ(toString(result.errors[1]),
"Type '{| p: number?, q: string? |}' could not be converted into '{| p: string?, q: number? |}'\n"
"caused by:\n"
" Property 'q' is not compatible. Type 'string?' could not be converted into 'number?'\n"
"caused by:\n"
" Not all union options are compatible. Type 'string' could not be converted into 'number?'\n"
"caused by:\n"
" None of the union options are compatible. For example: Type 'string' could not be converted into 'number' in an invariant context");
" Property 'q' is not compatible. Type 'string' could not be converted into 'number' in an invariant context");
}
else
{
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]),
"Type '{| p: number?, q: any |} & {| p: unknown, q: string? |}' could not be converted into '{| p: string?, "
"q: number? |}'; none of the intersection parts are compatible");
"Type '{| p: number?, q: any |} & {| p: unknown, q: string? |}' could not be converted into "
"'{| p: string?, q: number? |}'; none of the intersection parts are compatible");
}
}

View File

@ -1134,7 +1134,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "luau_polyfill_is_array_simplified")
return false
end
return true
end
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
@ -1179,4 +1179,30 @@ end
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "luau-polyfill.Array.startswith")
{
// This test also exercises whether the binary operator == passes the correct expected type
// to it's l,r operands
CheckResult result = check(R"(
--!strict
local function startsWith(value: string, substring: string, position: number?): boolean
-- Luau FIXME: we have to use a tmp variable, as Luau doesn't understand the logic below narrow position to `number`
local position_
if position == nil or position < 1 then
position_ = 1
else
position_ = position
end
return value:find(substring, position_, true) == position_
end
return startsWith
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END();

View File

@ -482,41 +482,6 @@ TEST_CASE_FIXTURE(Fixture, "dcr_can_partially_dispatch_a_constraint")
CHECK("<a>(a, number) -> ()" == toString(requireType("prime_iter")));
}
TEST_CASE_FIXTURE(Fixture, "free_options_cannot_be_unified_together")
{
ScopedFastFlag sff[] = {
{"LuauTransitiveSubtyping", true},
};
TypeArena arena;
TypeId nilType = builtinTypes->nilType;
std::unique_ptr scope = std::make_unique<Scope>(builtinTypes->anyTypePack);
TypeId free1 = arena.addType(FreeType{scope.get()});
TypeId option1 = arena.addType(UnionType{{nilType, free1}});
TypeId free2 = arena.addType(FreeType{scope.get()});
TypeId option2 = arena.addType(UnionType{{nilType, free2}});
InternalErrorReporter iceHandler;
UnifierSharedState sharedState{&iceHandler};
Normalizer normalizer{&arena, builtinTypes, NotNull{&sharedState}};
Unifier u{NotNull{&normalizer}, Mode::Strict, NotNull{scope.get()}, Location{}, Variance::Covariant};
u.tryUnify(option1, option2);
CHECK(!u.failure);
u.log.commit();
ToStringOptions opts;
CHECK("a?" == toString(option1, opts));
// CHECK("a?" == toString(option2, opts)); // This should hold, but does not.
CHECK("b?" == toString(option2, opts)); // This should not hold.
}
TEST_CASE_FIXTURE(BuiltinsFixture, "for_in_loop_with_zero_iterators")
{
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", false};

View File

@ -390,6 +390,8 @@ TEST_CASE_FIXTURE(Fixture, "widen_the_supertype_if_it_is_free_and_subtype_has_si
TEST_CASE_FIXTURE(Fixture, "return_type_of_f_is_not_widened")
{
ScopedFastFlag sff{"LuauUnifyTwoOptions", true};
CheckResult result = check(R"(
local function foo(f, x): "hello"? -- anyone there?
return if x == "hi"
@ -401,7 +403,9 @@ TEST_CASE_FIXTURE(Fixture, "return_type_of_f_is_not_widened")
LUAU_REQUIRE_NO_ERRORS(result);
CHECK_EQ(R"("hi")", toString(requireTypeAtPosition({3, 23})));
CHECK_EQ(R"(<a, b, c...>((string) -> (a, c...), b) -> "hello"?)", toString(requireType("foo")));
CHECK_EQ(R"(<a, b...>((string) -> ("hello", b...), a) -> "hello"?)", toString(requireType("foo")));
// This is more accurate but we're not there yet:
// CHECK_EQ(R"(<a, b...>((string) -> ("hello"?, b...), a) -> "hello"?)", toString(requireType("foo")));
}

View File

@ -1233,4 +1233,69 @@ TEST_CASE_FIXTURE(Fixture, "dcr_delays_expansion_of_function_containing_blocked_
)");
}
TEST_CASE_FIXTURE(BuiltinsFixture, "recursive_function_that_invokes_itself_with_a_refinement_of_its_parameter")
{
CheckResult result = check(R"(
local TRUE: true = true
local function matches(value, t: true)
if value then
return true
end
end
local function readValue(breakpoint)
if matches(breakpoint, TRUE) then
readValue(breakpoint)
end
end
)");
CHECK("<a>(a) -> ()" == toString(requireType("readValue")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "recursive_function_that_invokes_itself_with_a_refinement_of_its_parameter_2")
{
CheckResult result = check(R"(
local function readValue(breakpoint)
if type(breakpoint) == 'number' then
readValue(breakpoint)
end
end
)");
CHECK("(number) -> ()" == toString(requireType("readValue")));
}
/*
* We got into a case where, as we unified two nearly identical unions with one
* another, where we had a concatenated TxnLog that created a cycle between two
* free types.
*
* This code used to crash the type checker. See CLI-71190
*/
TEST_CASE_FIXTURE(BuiltinsFixture, "convoluted_case_where_two_TypeVars_were_bound_to_each_other")
{
check(R"(
type React_Ref<ElementType> = { current: ElementType } | ((ElementType) -> ())
type React_AbstractComponent<Config, Instance> = {
render: ((ref: React_Ref<Instance>) -> nil)
}
local createElement : <P, T>(React_AbstractComponent<P, T>) -> ()
function ScrollView:render()
local one = table.unpack(
if true then a else b
)
createElement(one)
createElement(one)
end
)");
// If this code does not crash, we are in good shape.
}
TEST_SUITE_END();

View File

@ -409,4 +409,91 @@ local l0:(any)&(typeof(_)),l0:(any)|(any) = _,_
LUAU_REQUIRE_ERRORS(result);
}
static TypeId createTheType(TypeArena& arena, NotNull<BuiltinTypes> builtinTypes, Scope* scope, TypeId freeTy)
{
/*
({|
render: (
(('a) -> ()) | {| current: 'a |}
) -> nil
|}) -> ()
*/
TypePackId emptyPack = arena.addTypePack({});
return arena.addType(FunctionType{
arena.addTypePack({arena.addType(TableType{
TableType::Props{{{"render",
Property(arena.addType(FunctionType{
arena.addTypePack({arena.addType(UnionType{{arena.addType(FunctionType{arena.addTypePack({freeTy}), emptyPack}),
arena.addType(TableType{TableType::Props{{"current", {freeTy}}}, std::nullopt, TypeLevel{}, scope, TableState::Sealed})}})}),
arena.addTypePack({builtinTypes->nilType})}))}}},
std::nullopt, TypeLevel{}, scope, TableState::Sealed})}),
emptyPack});
};
// See CLI-71190
TEST_CASE_FIXTURE(TryUnifyFixture, "unifying_two_unions_under_dcr_does_not_create_a_BoundType_cycle")
{
const std::shared_ptr<Scope> scope = globalScope;
const std::shared_ptr<Scope> nestedScope = std::make_shared<Scope>(scope);
const TypeId outerType = arena.freshType(scope.get());
const TypeId outerType2 = arena.freshType(scope.get());
const TypeId innerType = arena.freshType(nestedScope.get());
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
state.enableScopeTests();
SUBCASE("equal_scopes")
{
TypeId one = createTheType(arena, builtinTypes, scope.get(), outerType);
TypeId two = createTheType(arena, builtinTypes, scope.get(), outerType2);
state.tryUnify(one, two);
state.log.commit();
ToStringOptions opts;
CHECK(follow(outerType) == follow(outerType2));
}
SUBCASE("outer_scope_is_subtype")
{
TypeId one = createTheType(arena, builtinTypes, scope.get(), outerType);
TypeId two = createTheType(arena, builtinTypes, scope.get(), innerType);
state.tryUnify(one, two);
state.log.commit();
ToStringOptions opts;
CHECK(follow(outerType) == follow(innerType));
// The scope of outerType exceeds that of innerType. The latter should be bound to the former.
const BoundType* bt = get_if<BoundType>(&innerType->ty);
REQUIRE(bt);
CHECK(bt->boundTo == outerType);
}
SUBCASE("outer_scope_is_supertype")
{
TypeId one = createTheType(arena, builtinTypes, scope.get(), innerType);
TypeId two = createTheType(arena, builtinTypes, scope.get(), outerType);
state.tryUnify(one, two);
state.log.commit();
ToStringOptions opts;
CHECK(follow(outerType) == follow(innerType));
// The scope of outerType exceeds that of innerType. The latter should be bound to the former.
const BoundType* bt = get_if<BoundType>(&innerType->ty);
REQUIRE(bt);
CHECK(bt->boundTo == outerType);
}
}
TEST_SUITE_END();

View File

@ -792,4 +792,82 @@ TEST_CASE_FIXTURE(Fixture, "lookup_prop_of_intersection_containing_unions")
CHECK("variables" == unknownProp->key);
}
TEST_CASE_FIXTURE(Fixture, "free_options_can_be_unified_together")
{
ScopedFastFlag sff[] = {
{"LuauTransitiveSubtyping", true},
{"LuauUnifyTwoOptions", true}
};
TypeArena arena;
TypeId nilType = builtinTypes->nilType;
std::unique_ptr scope = std::make_unique<Scope>(builtinTypes->anyTypePack);
TypeId free1 = arena.addType(FreeType{scope.get()});
TypeId option1 = arena.addType(UnionType{{nilType, free1}});
TypeId free2 = arena.addType(FreeType{scope.get()});
TypeId option2 = arena.addType(UnionType{{nilType, free2}});
InternalErrorReporter iceHandler;
UnifierSharedState sharedState{&iceHandler};
Normalizer normalizer{&arena, builtinTypes, NotNull{&sharedState}};
Unifier u{NotNull{&normalizer}, Mode::Strict, NotNull{scope.get()}, Location{}, Variance::Covariant};
u.tryUnify(option1, option2);
CHECK(!u.failure);
u.log.commit();
ToStringOptions opts;
CHECK("a?" == toString(option1, opts));
CHECK("a?" == toString(option2, opts));
}
TEST_CASE_FIXTURE(Fixture, "unify_more_complex_unions_that_include_nil")
{
CheckResult result = check(R"(
type Record = {prop: (string | boolean)?}
function concatPagination(prop: (string | boolean | nil)?): Record
return {prop = prop}
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "optional_class_instances_are_invariant")
{
ScopedFastFlag sff[] = {
{"LuauUnifyTwoOptions", true},
{"LuauTypeMismatchInvarianceInError", true}
};
createSomeClasses(&frontend);
CheckResult result = check(R"(
function foo(ref: {current: Parent?})
end
function bar(ref: {current: Child?})
foo(ref)
end
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
// The last line of this error is the most important part. We need to
// communicate that this is an invariant context.
std::string expectedError =
"Type '{| current: Child? |}' could not be converted into '{| current: Parent? |}'\n"
"caused by:\n"
" Property 'current' is not compatible. Type 'Child' could not be converted into 'Parent' in an invariant context"
;
CHECK(expectedError == toString(result.errors[0]));
}
TEST_SUITE_END();

View File

@ -69,4 +69,17 @@ end
breakpointSetFromMetamethod()
-- break inside function with non-monotonic line info
local function cond(a)
if a then
print('a')
else
print('not a')
end
end
breakpoint(77)
pcall(cond, nil) -- prevent inlining
return 'OK'

View File

@ -1,5 +1,6 @@
AnnotationTests.too_many_type_params
AstQuery.last_argument_function_call_type
AutocompleteTest.autocomplete_response_perf1
BuiltinTests.aliased_string_format
BuiltinTests.assert_removes_falsy_types
BuiltinTests.assert_removes_falsy_types2
@ -34,7 +35,6 @@ GenericsTests.generic_functions_should_be_memory_safe
GenericsTests.generic_type_pack_parentheses
GenericsTests.higher_rank_polymorphism_should_not_accept_instantiated_arguments
GenericsTests.infer_generic_function_function_argument_2
GenericsTests.infer_generic_function_function_argument_3
GenericsTests.infer_generic_function_function_argument_overloaded
GenericsTests.infer_generic_lib_function_function_argument
GenericsTests.instantiated_function_argument_names
@ -47,7 +47,6 @@ ProvisionalTests.assign_table_with_refined_property_with_a_similar_type_is_illeg
ProvisionalTests.bail_early_if_unification_is_too_complicated
ProvisionalTests.do_not_ice_when_trying_to_pick_first_of_generic_type_pack
ProvisionalTests.error_on_eq_metamethod_returning_a_type_other_than_boolean
ProvisionalTests.free_options_cannot_be_unified_together
ProvisionalTests.generic_type_leak_to_module_interface_variadic
ProvisionalTests.greedy_inference_with_shared_self_triggers_function_with_no_returns
ProvisionalTests.luau-polyfill.Array.filter
@ -60,7 +59,6 @@ RefinementTest.type_narrow_to_vector
RefinementTest.typeguard_cast_free_table_to_vector
RefinementTest.typeguard_in_assert_position
RefinementTest.x_as_any_if_x_is_instance_elseif_x_is_table
RuntimeLimits.typescript_port_of_Result_type
TableTests.a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_compatible
TableTests.checked_prop_too_early
TableTests.disallow_indexing_into_an_unsealed_table_with_no_indexer_in_strict_mode
@ -124,6 +122,7 @@ TypeAliases.type_alias_local_mutation
TypeAliases.type_alias_local_rename
TypeAliases.type_alias_locations
TypeAliases.type_alias_of_an_imported_recursive_generic_type
TypeFamilyTests.function_internal_families
TypeInfer.check_type_infer_recursion_count
TypeInfer.cli_50041_committing_txnlog_in_apollo_client_error
TypeInfer.dont_report_type_errors_within_an_AstExprError
@ -133,6 +132,7 @@ TypeInfer.fuzz_free_table_type_change_during_index_check
TypeInfer.infer_assignment_value_types_mutable_lval
TypeInfer.no_stack_overflow_from_isoptional
TypeInfer.no_stack_overflow_from_isoptional2
TypeInfer.recursive_function_that_invokes_itself_with_a_refinement_of_its_parameter_2
TypeInfer.tc_after_error_recovery_no_replacement_name_in_error
TypeInfer.type_infer_recursion_limit_no_ice
TypeInfer.type_infer_recursion_limit_normalizer
@ -165,9 +165,7 @@ TypeInferFunctions.too_many_return_values_no_function
TypeInferLoops.for_in_loop_error_on_factory_not_returning_the_right_amount_of_values
TypeInferLoops.for_in_loop_with_next
TypeInferLoops.for_in_with_generic_next
TypeInferLoops.loop_iter_metamethod_ok_with_inference
TypeInferLoops.loop_iter_trailing_nil
TypeInferLoops.properly_infer_iteratee_is_a_free_table
TypeInferLoops.unreachable_code_after_infinite_loop
TypeInferModules.do_not_modify_imported_types_5
TypeInferModules.module_type_conflict
@ -177,7 +175,6 @@ TypeInferOOP.methods_are_topologically_sorted
TypeInferOperators.CallAndOrOfFunctions
TypeInferOperators.CallOrOfFunctions
TypeInferOperators.cli_38355_recursive_union
TypeInferOperators.compound_assign_metatable
TypeInferOperators.compound_assign_mismatch_metatable
TypeInferOperators.disallow_string_and_types_without_metatables_from_arithmetic_binary_ops
TypeInferOperators.operator_eq_completely_incompatible