Clone the Luau sources from a configurable git url instead of vendoring them in the repo

This commit is contained in:
Michael Pfaff 2023-07-16 14:46:51 -04:00
parent 63f535e684
commit 46f714691b
Signed by: michael
GPG Key ID: CF402C4A012AA9D4
162 changed files with 35 additions and 63457 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <stdint.h>
namespace Luau
{
const char* findConfusable(uint32_t codepoint);
}

View File

@ -1,264 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Ast.h"
#include "Luau/Location.h"
#include "Luau/DenseHash.h"
#include "Luau/Common.h"
#include <vector>
namespace Luau
{
class Allocator
{
public:
Allocator();
Allocator(Allocator&&);
Allocator& operator=(Allocator&&) = delete;
~Allocator();
void* allocate(size_t size);
template<typename T, typename... Args>
T* alloc(Args&&... args)
{
static_assert(std::is_trivially_destructible<T>::value, "Objects allocated with this allocator will never have their destructors run!");
T* t = static_cast<T*>(allocate(sizeof(T)));
new (t) T(std::forward<Args>(args)...);
return t;
}
private:
struct Page
{
Page* next;
char data[8192];
};
Page* root;
size_t offset;
};
struct Lexeme
{
enum Type
{
Eof = 0,
// 1..255 means actual character values
Char_END = 256,
Equal,
LessEqual,
GreaterEqual,
NotEqual,
Dot2,
Dot3,
SkinnyArrow,
DoubleColon,
InterpStringBegin,
InterpStringMid,
InterpStringEnd,
// An interpolated string with no expressions (like `x`)
InterpStringSimple,
AddAssign,
SubAssign,
MulAssign,
DivAssign,
ModAssign,
PowAssign,
ConcatAssign,
RawString,
QuotedString,
Number,
Name,
Comment,
BlockComment,
BrokenString,
BrokenComment,
BrokenUnicode,
BrokenInterpDoubleBrace,
Error,
Reserved_BEGIN,
ReservedAnd = Reserved_BEGIN,
ReservedBreak,
ReservedDo,
ReservedElse,
ReservedElseif,
ReservedEnd,
ReservedFalse,
ReservedFor,
ReservedFunction,
ReservedIf,
ReservedIn,
ReservedLocal,
ReservedNil,
ReservedNot,
ReservedOr,
ReservedRepeat,
ReservedReturn,
ReservedThen,
ReservedTrue,
ReservedUntil,
ReservedWhile,
Reserved_END
};
Type type;
Location location;
unsigned int length;
union
{
const char* data; // String, Number, Comment
const char* name; // Name
unsigned int codepoint; // BrokenUnicode
};
Lexeme(const Location& location, Type type);
Lexeme(const Location& location, char character);
Lexeme(const Location& location, Type type, const char* data, size_t size);
Lexeme(const Location& location, Type type, const char* name);
std::string toString() const;
};
class AstNameTable
{
public:
AstNameTable(Allocator& allocator);
AstName addStatic(const char* name, Lexeme::Type type = Lexeme::Name);
std::pair<AstName, Lexeme::Type> getOrAddWithType(const char* name, size_t length);
std::pair<AstName, Lexeme::Type> getWithType(const char* name, size_t length) const;
AstName getOrAdd(const char* name);
AstName get(const char* name) const;
private:
struct Entry
{
AstName value;
uint32_t length;
Lexeme::Type type;
bool operator==(const Entry& other) const;
};
struct EntryHash
{
size_t operator()(const Entry& e) const;
};
DenseHashSet<Entry, EntryHash> data;
Allocator& allocator;
};
class Lexer
{
public:
Lexer(const char* buffer, std::size_t bufferSize, AstNameTable& names);
void setSkipComments(bool skip);
void setReadNames(bool read);
const Location& previousLocation() const
{
return prevLocation;
}
const Lexeme& next();
const Lexeme& next(bool skipComments, bool updatePrevLocation);
void nextline();
Lexeme lookahead();
const Lexeme& current() const
{
return lexeme;
}
static bool isReserved(const std::string& word);
static bool fixupQuotedString(std::string& data);
static void fixupMultilineString(std::string& data);
private:
char peekch() const;
char peekch(unsigned int lookahead) const;
Position position() const;
void consume();
Lexeme readCommentBody();
// Given a sequence [===[ or ]===], returns:
// 1. number of equal signs (or 0 if none present) between the brackets
// 2. -1 if this is not a long comment/string separator
// 3. -N if this is a malformed separator
// Does *not* consume the closing brace.
int skipLongSeparator();
Lexeme readLongString(const Position& start, int sep, Lexeme::Type ok, Lexeme::Type broken);
Lexeme readQuotedString();
Lexeme readInterpolatedStringBegin();
Lexeme readInterpolatedStringSection(Position start, Lexeme::Type formatType, Lexeme::Type endType);
void readBackslashInString();
std::pair<AstName, Lexeme::Type> readName();
Lexeme readNumber(const Position& start, unsigned int startOffset);
Lexeme readUtf8Error();
Lexeme readNext();
const char* buffer;
std::size_t bufferSize;
unsigned int offset;
unsigned int line;
unsigned int lineOffset;
Lexeme lexeme;
Location prevLocation;
AstNameTable& names;
bool skipComments;
bool readNames;
enum class BraceType
{
InterpolatedString,
Normal
};
std::vector<BraceType> braceStack;
};
inline bool isSpace(char ch)
{
return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' || ch == '\v' || ch == '\f';
}
} // namespace Luau

View File

@ -1,43 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
struct Position
{
unsigned int line, column;
Position(unsigned int line, unsigned int column);
bool operator==(const Position& rhs) const;
bool operator!=(const Position& rhs) const;
bool operator<(const Position& rhs) const;
bool operator>(const Position& rhs) const;
bool operator<=(const Position& rhs) const;
bool operator>=(const Position& rhs) const;
void shift(const Position& start, const Position& oldEnd, const Position& newEnd);
};
struct Location
{
Position begin, end;
Location();
Location(const Position& begin, const Position& end);
Location(const Position& begin, unsigned int length);
Location(const Location& begin, const Location& end);
bool operator==(const Location& rhs) const;
bool operator!=(const Location& rhs) const;
bool encloses(const Location& l) const;
bool overlaps(const Location& l) const;
bool contains(const Position& p) const;
bool containsClosed(const Position& p) const;
void extend(const Location& other);
void shift(const Position& start, const Position& oldEnd, const Position& newEnd);
};
} // namespace Luau

View File

@ -1,21 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
enum class Mode
{
NoCheck, // Do not perform any inference
Nonstrict, // Unannotated symbols are any
Strict, // Unannotated symbols are inferred
Definition, // Type definition module, has special parsing rules
};
struct ParseOptions
{
bool allowDeclarationSyntax = false;
bool captureComments = false;
};
} // namespace Luau

View File

@ -1,71 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include "Luau/Location.h"
#include "Luau/Lexer.h"
#include "Luau/StringUtils.h"
namespace Luau
{
class AstStatBlock;
class ParseError : public std::exception
{
public:
ParseError(const Location& location, const std::string& message);
virtual const char* what() const throw();
const Location& getLocation() const;
const std::string& getMessage() const;
static LUAU_NORETURN void raise(const Location& location, const char* format, ...) LUAU_PRINTF_ATTR(2, 3);
private:
Location location;
std::string message;
};
class ParseErrors : public std::exception
{
public:
ParseErrors(std::vector<ParseError> errors);
virtual const char* what() const throw();
const std::vector<ParseError>& getErrors() const;
private:
std::vector<ParseError> errors;
std::string message;
};
struct HotComment
{
bool header;
Location location;
std::string content;
};
struct Comment
{
Lexeme::Type type; // Comment, BlockComment, or BrokenComment
Location location;
};
struct ParseResult
{
AstStatBlock* root;
size_t lines = 0;
std::vector<HotComment> hotcomments;
std::vector<ParseError> errors;
std::vector<Comment> commentLocations;
};
static constexpr const char* kParseNameError = "%error-id%";
} // namespace Luau

View File

@ -1,415 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Ast.h"
#include "Luau/Lexer.h"
#include "Luau/ParseOptions.h"
#include "Luau/ParseResult.h"
#include "Luau/StringUtils.h"
#include "Luau/DenseHash.h"
#include "Luau/Common.h"
#include <initializer_list>
#include <optional>
#include <tuple>
namespace Luau
{
template<typename T>
class TempVector
{
public:
explicit TempVector(std::vector<T>& storage);
~TempVector();
const T& operator[](std::size_t index) const;
const T& front() const;
const T& back() const;
bool empty() const;
std::size_t size() const;
void push_back(const T& item);
typename std::vector<T>::const_iterator begin() const
{
return storage.begin() + offset;
}
typename std::vector<T>::const_iterator end() const
{
return storage.begin() + offset + size_;
}
private:
std::vector<T>& storage;
size_t offset;
size_t size_;
};
class Parser
{
public:
static ParseResult parse(
const char* buffer, std::size_t bufferSize, AstNameTable& names, Allocator& allocator, ParseOptions options = ParseOptions());
private:
struct Name;
struct Binding;
Parser(const char* buffer, std::size_t bufferSize, AstNameTable& names, Allocator& allocator, const ParseOptions& options);
bool blockFollow(const Lexeme& l);
AstStatBlock* parseChunk();
// chunk ::= {stat [`;']} [laststat [`;']]
// block ::= chunk
AstStatBlock* parseBlock();
AstStatBlock* parseBlockNoScope();
// stat ::=
// varlist `=' explist |
// functioncall |
// do block end |
// while exp do block end |
// repeat block until exp |
// if exp then block {elseif exp then block} [else block] end |
// for Name `=' exp `,' exp [`,' exp] do block end |
// for namelist in explist do block end |
// function funcname funcbody |
// local function Name funcbody |
// local namelist [`=' explist]
// laststat ::= return [explist] | break
AstStat* parseStat();
// if exp then block {elseif exp then block} [else block] end
AstStat* parseIf();
// while exp do block end
AstStat* parseWhile();
// repeat block until exp
AstStat* parseRepeat();
// do block end
AstStat* parseDo();
// break
AstStat* parseBreak();
// continue
AstStat* parseContinue(const Location& start);
// for Name `=' exp `,' exp [`,' exp] do block end |
// for namelist in explist do block end |
AstStat* parseFor();
// funcname ::= Name {`.' Name} [`:' Name]
AstExpr* parseFunctionName(Location start, bool& hasself, AstName& debugname);
// function funcname funcbody
AstStat* parseFunctionStat();
// local function Name funcbody |
// local namelist [`=' explist]
AstStat* parseLocal();
// return [explist]
AstStat* parseReturn();
// type Name `=' Type
AstStat* parseTypeAlias(const Location& start, bool exported);
AstDeclaredClassProp parseDeclaredClassMethod();
// `declare global' Name: Type |
// `declare function' Name`(' [parlist] `)' [`:` Type]
AstStat* parseDeclaration(const Location& start);
// varlist `=' explist
AstStat* parseAssignment(AstExpr* initial);
// var [`+=' | `-=' | `*=' | `/=' | `%=' | `^=' | `..='] exp
AstStat* parseCompoundAssignment(AstExpr* initial, AstExprBinary::Op op);
std::pair<AstLocal*, AstArray<AstLocal*>> prepareFunctionArguments(const Location& start, bool hasself, const TempVector<Binding>& args);
// funcbodyhead ::= `(' [namelist [`,' `...'] | `...'] `)' [`:` Type]
// funcbody ::= funcbodyhead block end
std::pair<AstExprFunction*, AstLocal*> parseFunctionBody(
bool hasself, const Lexeme& matchFunction, const AstName& debugname, const Name* localName);
// explist ::= {exp `,'} exp
void parseExprList(TempVector<AstExpr*>& result);
// binding ::= Name [`:` Type]
Binding parseBinding();
// bindinglist ::= (binding | `...') {`,' bindinglist}
// Returns the location of the vararg ..., or std::nullopt if the function is not vararg.
std::tuple<bool, Location, AstTypePack*> parseBindingList(TempVector<Binding>& result, bool allowDot3 = false);
AstType* parseOptionalType();
// TypeList ::= Type [`,' TypeList]
// ReturnType ::= Type | `(' TypeList `)'
// TableProp ::= Name `:' Type
// TableIndexer ::= `[' Type `]' `:' Type
// PropList ::= (TableProp | TableIndexer) [`,' PropList]
// Type
// ::= Name
// | `nil`
// | `{' [PropList] `}'
// | `(' [TypeList] `)' `->` ReturnType
// Returns the variadic annotation, if it exists.
AstTypePack* parseTypeList(TempVector<AstType*>& result, TempVector<std::optional<AstArgumentName>>& resultNames);
std::optional<AstTypeList> parseOptionalReturnType();
std::pair<Location, AstTypeList> parseReturnType();
AstTableIndexer* parseTableIndexer();
AstTypeOrPack parseFunctionType(bool allowPack);
AstType* parseFunctionTypeTail(const Lexeme& begin, AstArray<AstGenericType> generics, AstArray<AstGenericTypePack> genericPacks,
AstArray<AstType*> params, AstArray<std::optional<AstArgumentName>> paramNames, AstTypePack* varargAnnotation);
AstType* parseTableType();
AstTypeOrPack parseSimpleType(bool allowPack);
AstTypeOrPack parseTypeOrPack();
AstType* parseType();
AstTypePack* parseTypePack();
AstTypePack* parseVariadicArgumentTypePack();
AstType* parseTypeSuffix(AstType* type, const Location& begin);
static std::optional<AstExprUnary::Op> parseUnaryOp(const Lexeme& l);
static std::optional<AstExprBinary::Op> parseBinaryOp(const Lexeme& l);
static std::optional<AstExprBinary::Op> parseCompoundOp(const Lexeme& l);
struct BinaryOpPriority
{
unsigned char left, right;
};
std::optional<AstExprUnary::Op> checkUnaryConfusables();
std::optional<AstExprBinary::Op> checkBinaryConfusables(const BinaryOpPriority binaryPriority[], unsigned int limit);
// subexpr -> (asexp | unop subexpr) { binop subexpr }
// where `binop' is any binary operator with a priority higher than `limit'
AstExpr* parseExpr(unsigned int limit = 0);
// NAME
AstExpr* parseNameExpr(const char* context = nullptr);
// prefixexp -> NAME | '(' expr ')'
AstExpr* parsePrefixExpr();
// primaryexp -> prefixexp { `.' NAME | `[' exp `]' | `:' NAME funcargs | funcargs }
AstExpr* parsePrimaryExpr(bool asStatement);
// asexp -> simpleexp [`::' Type]
AstExpr* parseAssertionExpr();
// simpleexp -> NUMBER | STRING | NIL | true | false | ... | constructor | FUNCTION body | primaryexp
AstExpr* parseSimpleExpr();
// args ::= `(' [explist] `)' | tableconstructor | String
AstExpr* parseFunctionArgs(AstExpr* func, bool self);
// tableconstructor ::= `{' [fieldlist] `}'
// fieldlist ::= field {fieldsep field} [fieldsep]
// field ::= `[' exp `]' `=' exp | Name `=' exp | exp
// fieldsep ::= `,' | `;'
AstExpr* parseTableConstructor();
// TODO: Add grammar rules here?
AstExpr* parseIfElseExpr();
// stringinterp ::= <INTERP_BEGIN> exp {<INTERP_MID> exp} <INTERP_END>
AstExpr* parseInterpString();
// Name
std::optional<Name> parseNameOpt(const char* context = nullptr);
Name parseName(const char* context = nullptr);
Name parseIndexName(const char* context, const Position& previous);
// `<' namelist `>'
std::pair<AstArray<AstGenericType>, AstArray<AstGenericTypePack>> parseGenericTypeList(bool withDefaultValues);
// `<' Type[, ...] `>'
AstArray<AstTypeOrPack> parseTypeParams();
std::optional<AstArray<char>> parseCharArray();
AstExpr* parseString();
AstExpr* parseNumber();
AstLocal* pushLocal(const Binding& binding);
unsigned int saveLocals();
void restoreLocals(unsigned int offset);
// check that parser is at lexeme/symbol, move to next lexeme/symbol on success, report failure and continue on failure
bool expectAndConsume(char value, const char* context = nullptr);
bool expectAndConsume(Lexeme::Type type, const char* context = nullptr);
void expectAndConsumeFail(Lexeme::Type type, const char* context);
struct MatchLexeme
{
MatchLexeme(const Lexeme& l)
: type(l.type)
, position(l.location.begin)
{
}
Lexeme::Type type;
Position position;
};
bool expectMatchAndConsume(char value, const MatchLexeme& begin, bool searchForMissing = false);
void expectMatchAndConsumeFail(Lexeme::Type type, const MatchLexeme& begin, const char* extra = nullptr);
bool expectMatchAndConsumeRecover(char value, const MatchLexeme& begin, bool searchForMissing);
bool expectMatchEndAndConsume(Lexeme::Type type, const MatchLexeme& begin);
void expectMatchEndAndConsumeFail(Lexeme::Type type, const MatchLexeme& begin);
template<typename T>
AstArray<T> copy(const T* data, std::size_t size);
template<typename T>
AstArray<T> copy(const TempVector<T>& data);
template<typename T>
AstArray<T> copy(std::initializer_list<T> data);
AstArray<char> copy(const std::string& data);
void incrementRecursionCounter(const char* context);
void report(const Location& location, const char* format, va_list args);
void report(const Location& location, const char* format, ...) LUAU_PRINTF_ATTR(3, 4);
void reportNameError(const char* context);
AstStatError* reportStatError(const Location& location, const AstArray<AstExpr*>& expressions, const AstArray<AstStat*>& statements,
const char* format, ...) LUAU_PRINTF_ATTR(5, 6);
AstExprError* reportExprError(const Location& location, const AstArray<AstExpr*>& expressions, const char* format, ...) LUAU_PRINTF_ATTR(4, 5);
AstTypeError* reportTypeError(const Location& location, const AstArray<AstType*>& types, const char* format, ...) LUAU_PRINTF_ATTR(4, 5);
// `parseErrorLocation` is associated with the parser error
// `astErrorLocation` is associated with the AstTypeError created
// It can be useful to have different error locations so that the parse error can include the next lexeme, while the AstTypeError can precisely
// define the location (possibly of zero size) where a type annotation is expected.
AstTypeError* reportMissingTypeError(const Location& parseErrorLocation, const Location& astErrorLocation, const char* format, ...)
LUAU_PRINTF_ATTR(4, 5);
AstExpr* reportFunctionArgsError(AstExpr* func, bool self);
void reportAmbiguousCallError();
void nextLexeme();
struct Function
{
bool vararg;
unsigned int loopDepth;
Function()
: vararg(false)
, loopDepth(0)
{
}
};
struct Local
{
AstLocal* local;
unsigned int offset;
Local()
: local(nullptr)
, offset(0)
{
}
};
struct Name
{
AstName name;
Location location;
Name(const AstName& name, const Location& location)
: name(name)
, location(location)
{
}
};
struct Binding
{
Name name;
AstType* annotation;
explicit Binding(const Name& name, AstType* annotation = nullptr)
: name(name)
, annotation(annotation)
{
}
};
ParseOptions options;
Lexer lexer;
Allocator& allocator;
std::vector<Comment> commentLocations;
std::vector<HotComment> hotcomments;
bool hotcommentHeader = true;
unsigned int recursionCounter;
AstName nameSelf;
AstName nameNumber;
AstName nameError;
AstName nameNil;
MatchLexeme endMismatchSuspect;
std::vector<Function> functionStack;
DenseHashMap<AstName, AstLocal*> localMap;
std::vector<AstLocal*> localStack;
std::vector<ParseError> parseErrors;
std::vector<unsigned int> matchRecoveryStopOnToken;
std::vector<AstStat*> scratchStat;
std::vector<AstArray<char>> scratchString;
std::vector<AstExpr*> scratchExpr;
std::vector<AstExpr*> scratchExprAux;
std::vector<AstName> scratchName;
std::vector<AstName> scratchPackName;
std::vector<Binding> scratchBinding;
std::vector<AstLocal*> scratchLocal;
std::vector<AstTableProp> scratchTableTypeProps;
std::vector<AstType*> scratchType;
std::vector<AstTypeOrPack> scratchTypeOrPack;
std::vector<AstDeclaredClassProp> scratchDeclaredClassProps;
std::vector<AstExprTable::Item> scratchItem;
std::vector<AstArgumentName> scratchArgName;
std::vector<AstGenericType> scratchGenericTypes;
std::vector<AstGenericTypePack> scratchGenericTypePacks;
std::vector<std::optional<AstArgumentName>> scratchOptArgName;
std::string scratchData;
};
} // namespace Luau

View File

@ -1,36 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include <vector>
#include <string>
#include <stdarg.h>
namespace Luau
{
std::string format(const char* fmt, ...) LUAU_PRINTF_ATTR(1, 2);
std::string vformat(const char* fmt, va_list args);
void formatAppend(std::string& str, const char* fmt, ...) LUAU_PRINTF_ATTR(2, 3);
void vformatAppend(std::string& ret, const char* fmt, va_list args);
std::string join(const std::vector<std::string_view>& segments, std::string_view delimiter);
std::string join(const std::vector<std::string>& segments, std::string_view delimiter);
std::vector<std::string_view> split(std::string_view s, char delimiter);
// Computes the Damerau-Levenshtein distance of A and B.
// https://en.wikipedia.org/wiki/Damerau-Levenshtein_distance#Distance_with_adjacent_transpositions
size_t editDistance(std::string_view a, std::string_view b);
bool startsWith(std::string_view lhs, std::string_view rhs);
bool equalsLower(std::string_view lhs, std::string_view rhs);
size_t hashRange(const char* data, size_t size);
std::string escape(std::string_view s, bool escapeForInterpString = false);
bool isIdentifier(std::string_view s);
} // namespace Luau

View File

@ -1,230 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include <vector>
#include <stdint.h>
LUAU_FASTFLAG(DebugLuauTimeTracing)
namespace Luau
{
namespace TimeTrace
{
double getClock();
uint32_t getClockMicroseconds();
} // namespace TimeTrace
} // namespace Luau
#if defined(LUAU_ENABLE_TIME_TRACE)
namespace Luau
{
namespace TimeTrace
{
struct Token
{
const char* name;
const char* category;
};
enum class EventType : uint8_t
{
Enter,
Leave,
ArgName,
ArgValue,
};
struct Event
{
EventType type;
uint16_t token;
union
{
uint32_t microsec; // 1 hour trace limit
uint32_t dataPos;
} data;
};
struct GlobalContext;
struct ThreadContext;
GlobalContext& getGlobalContext();
uint16_t createToken(GlobalContext& context, const char* name, const char* category);
uint32_t createThread(GlobalContext& context, ThreadContext* threadContext);
void releaseThread(GlobalContext& context, ThreadContext* threadContext);
void flushEvents(GlobalContext& context, uint32_t threadId, const std::vector<Event>& events, const std::vector<char>& data);
struct ThreadContext
{
ThreadContext()
: globalContext(getGlobalContext())
{
threadId = createThread(globalContext, this);
}
~ThreadContext()
{
if (!events.empty())
flushEvents();
releaseThread(globalContext, this);
}
void flushEvents()
{
static uint16_t flushToken = createToken(globalContext, "flushEvents", "TimeTrace");
events.push_back({EventType::Enter, flushToken, {getClockMicroseconds()}});
TimeTrace::flushEvents(globalContext, threadId, events, data);
events.clear();
data.clear();
events.push_back({EventType::Leave, 0, {getClockMicroseconds()}});
}
void eventEnter(uint16_t token)
{
eventEnter(token, getClockMicroseconds());
}
void eventEnter(uint16_t token, uint32_t microsec)
{
events.push_back({EventType::Enter, token, {microsec}});
}
void eventLeave()
{
eventLeave(getClockMicroseconds());
}
void eventLeave(uint32_t microsec)
{
events.push_back({EventType::Leave, 0, {microsec}});
if (events.size() > kEventFlushLimit)
flushEvents();
}
void eventArgument(const char* name, const char* value)
{
uint32_t pos = uint32_t(data.size());
data.insert(data.end(), name, name + strlen(name) + 1);
events.push_back({EventType::ArgName, 0, {pos}});
pos = uint32_t(data.size());
data.insert(data.end(), value, value + strlen(value) + 1);
events.push_back({EventType::ArgValue, 0, {pos}});
}
GlobalContext& globalContext;
uint32_t threadId;
std::vector<Event> events;
std::vector<char> data;
static constexpr size_t kEventFlushLimit = 8192;
};
ThreadContext& getThreadContext();
struct Scope
{
explicit Scope(uint16_t token)
: context(getThreadContext())
{
if (!FFlag::DebugLuauTimeTracing)
return;
context.eventEnter(token);
}
~Scope()
{
if (!FFlag::DebugLuauTimeTracing)
return;
context.eventLeave();
}
ThreadContext& context;
};
struct OptionalTailScope
{
explicit OptionalTailScope(uint16_t token, uint32_t threshold)
: context(getThreadContext())
, token(token)
, threshold(threshold)
{
if (!FFlag::DebugLuauTimeTracing)
return;
pos = uint32_t(context.events.size());
microsec = getClockMicroseconds();
}
~OptionalTailScope()
{
if (!FFlag::DebugLuauTimeTracing)
return;
if (pos == context.events.size())
{
uint32_t curr = getClockMicroseconds();
if (curr - microsec > threshold)
{
context.eventEnter(token, microsec);
context.eventLeave(curr);
}
}
}
ThreadContext& context;
uint16_t token;
uint32_t threshold;
uint32_t microsec;
uint32_t pos;
};
LUAU_NOINLINE uint16_t createScopeData(const char* name, const char* category);
} // namespace TimeTrace
} // namespace Luau
// Regular scope
#define LUAU_TIMETRACE_SCOPE(name, category) \
static uint16_t lttScopeStatic = Luau::TimeTrace::createScopeData(name, category); \
Luau::TimeTrace::Scope lttScope(lttScopeStatic)
// A scope without nested scopes that may be skipped if the time it took is less than the threshold
#define LUAU_TIMETRACE_OPTIONAL_TAIL_SCOPE(name, category, microsec) \
static uint16_t lttScopeStaticOptTail = Luau::TimeTrace::createScopeData(name, category); \
Luau::TimeTrace::OptionalTailScope lttScope(lttScopeStaticOptTail, microsec)
// Extra key/value data can be added to regular scopes
#define LUAU_TIMETRACE_ARGUMENT(name, value) \
do \
{ \
if (FFlag::DebugLuauTimeTracing) \
lttScope.context.eventArgument(name, value); \
} while (false)
#else
#define LUAU_TIMETRACE_SCOPE(name, category)
#define LUAU_TIMETRACE_OPTIONAL_TAIL_SCOPE(name, category, microsec)
#define LUAU_TIMETRACE_ARGUMENT(name, value) \
do \
{ \
} while (false)
#endif

View File

@ -1,972 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Ast.h"
#include "Luau/Common.h"
namespace Luau
{
static void visitTypeList(AstVisitor* visitor, const AstTypeList& list)
{
for (AstType* ty : list.types)
ty->visit(visitor);
if (list.tailType)
list.tailType->visit(visitor);
}
int gAstRttiIndex = 0;
AstExprGroup::AstExprGroup(const Location& location, AstExpr* expr)
: AstExpr(ClassIndex(), location)
, expr(expr)
{
}
void AstExprGroup::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
expr->visit(visitor);
}
AstExprConstantNil::AstExprConstantNil(const Location& location)
: AstExpr(ClassIndex(), location)
{
}
void AstExprConstantNil::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstExprConstantBool::AstExprConstantBool(const Location& location, bool value)
: AstExpr(ClassIndex(), location)
, value(value)
{
}
void AstExprConstantBool::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstExprConstantNumber::AstExprConstantNumber(const Location& location, double value, ConstantNumberParseResult parseResult)
: AstExpr(ClassIndex(), location)
, value(value)
, parseResult(parseResult)
{
}
void AstExprConstantNumber::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstExprConstantString::AstExprConstantString(const Location& location, const AstArray<char>& value)
: AstExpr(ClassIndex(), location)
, value(value)
{
}
void AstExprConstantString::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstExprLocal::AstExprLocal(const Location& location, AstLocal* local, bool upvalue)
: AstExpr(ClassIndex(), location)
, local(local)
, upvalue(upvalue)
{
}
void AstExprLocal::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstExprGlobal::AstExprGlobal(const Location& location, const AstName& name)
: AstExpr(ClassIndex(), location)
, name(name)
{
}
void AstExprGlobal::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstExprVarargs::AstExprVarargs(const Location& location)
: AstExpr(ClassIndex(), location)
{
}
void AstExprVarargs::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstExprCall::AstExprCall(const Location& location, AstExpr* func, const AstArray<AstExpr*>& args, bool self, const Location& argLocation)
: AstExpr(ClassIndex(), location)
, func(func)
, args(args)
, self(self)
, argLocation(argLocation)
{
}
void AstExprCall::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
func->visit(visitor);
for (AstExpr* arg : args)
arg->visit(visitor);
}
}
AstExprIndexName::AstExprIndexName(
const Location& location, AstExpr* expr, const AstName& index, const Location& indexLocation, const Position& opPosition, char op)
: AstExpr(ClassIndex(), location)
, expr(expr)
, index(index)
, indexLocation(indexLocation)
, opPosition(opPosition)
, op(op)
{
}
void AstExprIndexName::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
expr->visit(visitor);
}
AstExprIndexExpr::AstExprIndexExpr(const Location& location, AstExpr* expr, AstExpr* index)
: AstExpr(ClassIndex(), location)
, expr(expr)
, index(index)
{
}
void AstExprIndexExpr::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
expr->visit(visitor);
index->visit(visitor);
}
}
AstExprFunction::AstExprFunction(const Location& location, const AstArray<AstGenericType>& generics, const AstArray<AstGenericTypePack>& genericPacks,
AstLocal* self, const AstArray<AstLocal*>& args, bool vararg, const Location& varargLocation, AstStatBlock* body, size_t functionDepth,
const AstName& debugname, const std::optional<AstTypeList>& returnAnnotation, AstTypePack* varargAnnotation, bool hasEnd,
const std::optional<Location>& argLocation)
: AstExpr(ClassIndex(), location)
, generics(generics)
, genericPacks(genericPacks)
, self(self)
, args(args)
, returnAnnotation(returnAnnotation)
, vararg(vararg)
, varargLocation(varargLocation)
, varargAnnotation(varargAnnotation)
, body(body)
, functionDepth(functionDepth)
, debugname(debugname)
, hasEnd(hasEnd)
, argLocation(argLocation)
{
}
void AstExprFunction::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstLocal* arg : args)
{
if (arg->annotation)
arg->annotation->visit(visitor);
}
if (varargAnnotation)
varargAnnotation->visit(visitor);
if (returnAnnotation)
visitTypeList(visitor, *returnAnnotation);
body->visit(visitor);
}
}
AstExprTable::AstExprTable(const Location& location, const AstArray<Item>& items)
: AstExpr(ClassIndex(), location)
, items(items)
{
}
void AstExprTable::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (const Item& item : items)
{
if (item.key)
item.key->visit(visitor);
item.value->visit(visitor);
}
}
}
AstExprUnary::AstExprUnary(const Location& location, Op op, AstExpr* expr)
: AstExpr(ClassIndex(), location)
, op(op)
, expr(expr)
{
}
void AstExprUnary::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
expr->visit(visitor);
}
std::string toString(AstExprUnary::Op op)
{
switch (op)
{
case AstExprUnary::Minus:
return "-";
case AstExprUnary::Not:
return "not";
case AstExprUnary::Len:
return "#";
default:
LUAU_ASSERT(false);
return ""; // MSVC requires this even though the switch/case is exhaustive
}
}
AstExprBinary::AstExprBinary(const Location& location, Op op, AstExpr* left, AstExpr* right)
: AstExpr(ClassIndex(), location)
, op(op)
, left(left)
, right(right)
{
}
void AstExprBinary::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
left->visit(visitor);
right->visit(visitor);
}
}
std::string toString(AstExprBinary::Op op)
{
switch (op)
{
case AstExprBinary::Add:
return "+";
case AstExprBinary::Sub:
return "-";
case AstExprBinary::Mul:
return "*";
case AstExprBinary::Div:
return "/";
case AstExprBinary::Mod:
return "%";
case AstExprBinary::Pow:
return "^";
case AstExprBinary::Concat:
return "..";
case AstExprBinary::CompareNe:
return "~=";
case AstExprBinary::CompareEq:
return "==";
case AstExprBinary::CompareLt:
return "<";
case AstExprBinary::CompareLe:
return "<=";
case AstExprBinary::CompareGt:
return ">";
case AstExprBinary::CompareGe:
return ">=";
case AstExprBinary::And:
return "and";
case AstExprBinary::Or:
return "or";
default:
LUAU_ASSERT(false);
return ""; // MSVC requires this even though the switch/case is exhaustive
}
}
AstExprTypeAssertion::AstExprTypeAssertion(const Location& location, AstExpr* expr, AstType* annotation)
: AstExpr(ClassIndex(), location)
, expr(expr)
, annotation(annotation)
{
}
void AstExprTypeAssertion::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
expr->visit(visitor);
annotation->visit(visitor);
}
}
AstExprIfElse::AstExprIfElse(const Location& location, AstExpr* condition, bool hasThen, AstExpr* trueExpr, bool hasElse, AstExpr* falseExpr)
: AstExpr(ClassIndex(), location)
, condition(condition)
, hasThen(hasThen)
, trueExpr(trueExpr)
, hasElse(hasElse)
, falseExpr(falseExpr)
{
}
void AstExprIfElse::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
condition->visit(visitor);
trueExpr->visit(visitor);
falseExpr->visit(visitor);
}
}
AstExprError::AstExprError(const Location& location, const AstArray<AstExpr*>& expressions, unsigned messageIndex)
: AstExpr(ClassIndex(), location)
, expressions(expressions)
, messageIndex(messageIndex)
{
}
AstExprInterpString::AstExprInterpString(const Location& location, const AstArray<AstArray<char>>& strings, const AstArray<AstExpr*>& expressions)
: AstExpr(ClassIndex(), location)
, strings(strings)
, expressions(expressions)
{
}
void AstExprInterpString::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstExpr* expr : expressions)
expr->visit(visitor);
}
}
void AstExprError::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstExpr* expression : expressions)
expression->visit(visitor);
}
}
AstStatBlock::AstStatBlock(const Location& location, const AstArray<AstStat*>& body)
: AstStat(ClassIndex(), location)
, body(body)
{
}
void AstStatBlock::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstStat* stat : body)
stat->visit(visitor);
}
}
AstStatIf::AstStatIf(const Location& location, AstExpr* condition, AstStatBlock* thenbody, AstStat* elsebody,
const std::optional<Location>& thenLocation, const std::optional<Location>& elseLocation, bool hasEnd)
: AstStat(ClassIndex(), location)
, condition(condition)
, thenbody(thenbody)
, elsebody(elsebody)
, thenLocation(thenLocation)
, elseLocation(elseLocation)
, hasEnd(hasEnd)
{
}
void AstStatIf::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
condition->visit(visitor);
thenbody->visit(visitor);
if (elsebody)
elsebody->visit(visitor);
}
}
AstStatWhile::AstStatWhile(const Location& location, AstExpr* condition, AstStatBlock* body, bool hasDo, const Location& doLocation, bool hasEnd)
: AstStat(ClassIndex(), location)
, condition(condition)
, body(body)
, hasDo(hasDo)
, doLocation(doLocation)
, hasEnd(hasEnd)
{
}
void AstStatWhile::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
condition->visit(visitor);
body->visit(visitor);
}
}
AstStatRepeat::AstStatRepeat(const Location& location, AstExpr* condition, AstStatBlock* body, bool hasUntil)
: AstStat(ClassIndex(), location)
, condition(condition)
, body(body)
, hasUntil(hasUntil)
{
}
void AstStatRepeat::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
body->visit(visitor);
condition->visit(visitor);
}
}
AstStatBreak::AstStatBreak(const Location& location)
: AstStat(ClassIndex(), location)
{
}
void AstStatBreak::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstStatContinue::AstStatContinue(const Location& location)
: AstStat(ClassIndex(), location)
{
}
void AstStatContinue::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstStatReturn::AstStatReturn(const Location& location, const AstArray<AstExpr*>& list)
: AstStat(ClassIndex(), location)
, list(list)
{
}
void AstStatReturn::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstExpr* expr : list)
expr->visit(visitor);
}
}
AstStatExpr::AstStatExpr(const Location& location, AstExpr* expr)
: AstStat(ClassIndex(), location)
, expr(expr)
{
}
void AstStatExpr::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
expr->visit(visitor);
}
AstStatLocal::AstStatLocal(
const Location& location, const AstArray<AstLocal*>& vars, const AstArray<AstExpr*>& values, const std::optional<Location>& equalsSignLocation)
: AstStat(ClassIndex(), location)
, vars(vars)
, values(values)
, equalsSignLocation(equalsSignLocation)
{
}
void AstStatLocal::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstLocal* var : vars)
{
if (var->annotation)
var->annotation->visit(visitor);
}
for (AstExpr* expr : values)
expr->visit(visitor);
}
}
AstStatFor::AstStatFor(const Location& location, AstLocal* var, AstExpr* from, AstExpr* to, AstExpr* step, AstStatBlock* body, bool hasDo,
const Location& doLocation, bool hasEnd)
: AstStat(ClassIndex(), location)
, var(var)
, from(from)
, to(to)
, step(step)
, body(body)
, hasDo(hasDo)
, doLocation(doLocation)
, hasEnd(hasEnd)
{
}
void AstStatFor::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
if (var->annotation)
var->annotation->visit(visitor);
from->visit(visitor);
to->visit(visitor);
if (step)
step->visit(visitor);
body->visit(visitor);
}
}
AstStatForIn::AstStatForIn(const Location& location, const AstArray<AstLocal*>& vars, const AstArray<AstExpr*>& values, AstStatBlock* body,
bool hasIn, const Location& inLocation, bool hasDo, const Location& doLocation, bool hasEnd)
: AstStat(ClassIndex(), location)
, vars(vars)
, values(values)
, body(body)
, hasIn(hasIn)
, inLocation(inLocation)
, hasDo(hasDo)
, doLocation(doLocation)
, hasEnd(hasEnd)
{
}
void AstStatForIn::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstLocal* var : vars)
{
if (var->annotation)
var->annotation->visit(visitor);
}
for (AstExpr* expr : values)
expr->visit(visitor);
body->visit(visitor);
}
}
AstStatAssign::AstStatAssign(const Location& location, const AstArray<AstExpr*>& vars, const AstArray<AstExpr*>& values)
: AstStat(ClassIndex(), location)
, vars(vars)
, values(values)
{
}
void AstStatAssign::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstExpr* lvalue : vars)
lvalue->visit(visitor);
for (AstExpr* expr : values)
expr->visit(visitor);
}
}
AstStatCompoundAssign::AstStatCompoundAssign(const Location& location, AstExprBinary::Op op, AstExpr* var, AstExpr* value)
: AstStat(ClassIndex(), location)
, op(op)
, var(var)
, value(value)
{
}
void AstStatCompoundAssign::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
var->visit(visitor);
value->visit(visitor);
}
}
AstStatFunction::AstStatFunction(const Location& location, AstExpr* name, AstExprFunction* func)
: AstStat(ClassIndex(), location)
, name(name)
, func(func)
{
}
void AstStatFunction::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
name->visit(visitor);
func->visit(visitor);
}
}
AstStatLocalFunction::AstStatLocalFunction(const Location& location, AstLocal* name, AstExprFunction* func)
: AstStat(ClassIndex(), location)
, name(name)
, func(func)
{
}
void AstStatLocalFunction::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
func->visit(visitor);
}
AstStatTypeAlias::AstStatTypeAlias(const Location& location, const AstName& name, const Location& nameLocation,
const AstArray<AstGenericType>& generics, const AstArray<AstGenericTypePack>& genericPacks, AstType* type, bool exported)
: AstStat(ClassIndex(), location)
, name(name)
, nameLocation(nameLocation)
, generics(generics)
, genericPacks(genericPacks)
, type(type)
, exported(exported)
{
}
void AstStatTypeAlias::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (const AstGenericType& el : generics)
{
if (el.defaultValue)
el.defaultValue->visit(visitor);
}
for (const AstGenericTypePack& el : genericPacks)
{
if (el.defaultValue)
el.defaultValue->visit(visitor);
}
type->visit(visitor);
}
}
AstStatDeclareGlobal::AstStatDeclareGlobal(const Location& location, const AstName& name, AstType* type)
: AstStat(ClassIndex(), location)
, name(name)
, type(type)
{
}
void AstStatDeclareGlobal::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
type->visit(visitor);
}
AstStatDeclareFunction::AstStatDeclareFunction(const Location& location, const AstName& name, const AstArray<AstGenericType>& generics,
const AstArray<AstGenericTypePack>& genericPacks, const AstTypeList& params, const AstArray<AstArgumentName>& paramNames,
const AstTypeList& retTypes)
: AstStat(ClassIndex(), location)
, name(name)
, generics(generics)
, genericPacks(genericPacks)
, params(params)
, paramNames(paramNames)
, retTypes(retTypes)
{
}
void AstStatDeclareFunction::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
visitTypeList(visitor, params);
visitTypeList(visitor, retTypes);
}
}
AstStatDeclareClass::AstStatDeclareClass(const Location& location, const AstName& name, std::optional<AstName> superName,
const AstArray<AstDeclaredClassProp>& props, AstTableIndexer* indexer)
: AstStat(ClassIndex(), location)
, name(name)
, superName(superName)
, props(props)
, indexer(indexer)
{
}
void AstStatDeclareClass::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (const AstDeclaredClassProp& prop : props)
prop.ty->visit(visitor);
}
}
AstStatError::AstStatError(
const Location& location, const AstArray<AstExpr*>& expressions, const AstArray<AstStat*>& statements, unsigned messageIndex)
: AstStat(ClassIndex(), location)
, expressions(expressions)
, statements(statements)
, messageIndex(messageIndex)
{
}
void AstStatError::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstNode* expression : expressions)
expression->visit(visitor);
for (AstNode* statement : statements)
statement->visit(visitor);
}
}
AstTypeReference::AstTypeReference(const Location& location, std::optional<AstName> prefix, AstName name, std::optional<Location> prefixLocation,
const Location& nameLocation, bool hasParameterList, const AstArray<AstTypeOrPack>& parameters)
: AstType(ClassIndex(), location)
, hasParameterList(hasParameterList)
, prefix(prefix)
, prefixLocation(prefixLocation)
, name(name)
, nameLocation(nameLocation)
, parameters(parameters)
{
}
void AstTypeReference::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (const AstTypeOrPack& param : parameters)
{
if (param.type)
param.type->visit(visitor);
else
param.typePack->visit(visitor);
}
}
}
AstTypeTable::AstTypeTable(const Location& location, const AstArray<AstTableProp>& props, AstTableIndexer* indexer)
: AstType(ClassIndex(), location)
, props(props)
, indexer(indexer)
{
}
void AstTypeTable::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (const AstTableProp& prop : props)
prop.type->visit(visitor);
if (indexer)
{
indexer->indexType->visit(visitor);
indexer->resultType->visit(visitor);
}
}
}
AstTypeFunction::AstTypeFunction(const Location& location, const AstArray<AstGenericType>& generics, const AstArray<AstGenericTypePack>& genericPacks,
const AstTypeList& argTypes, const AstArray<std::optional<AstArgumentName>>& argNames, const AstTypeList& returnTypes)
: AstType(ClassIndex(), location)
, generics(generics)
, genericPacks(genericPacks)
, argTypes(argTypes)
, argNames(argNames)
, returnTypes(returnTypes)
{
LUAU_ASSERT(argNames.size == 0 || argNames.size == argTypes.types.size);
}
void AstTypeFunction::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
visitTypeList(visitor, argTypes);
visitTypeList(visitor, returnTypes);
}
}
AstTypeTypeof::AstTypeTypeof(const Location& location, AstExpr* expr)
: AstType(ClassIndex(), location)
, expr(expr)
{
}
void AstTypeTypeof::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
expr->visit(visitor);
}
AstTypeUnion::AstTypeUnion(const Location& location, const AstArray<AstType*>& types)
: AstType(ClassIndex(), location)
, types(types)
{
}
void AstTypeUnion::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstType* type : types)
type->visit(visitor);
}
}
AstTypeIntersection::AstTypeIntersection(const Location& location, const AstArray<AstType*>& types)
: AstType(ClassIndex(), location)
, types(types)
{
}
void AstTypeIntersection::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstType* type : types)
type->visit(visitor);
}
}
AstTypeSingletonBool::AstTypeSingletonBool(const Location& location, bool value)
: AstType(ClassIndex(), location)
, value(value)
{
}
void AstTypeSingletonBool::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstTypeSingletonString::AstTypeSingletonString(const Location& location, const AstArray<char>& value)
: AstType(ClassIndex(), location)
, value(value)
{
}
void AstTypeSingletonString::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstTypeError::AstTypeError(const Location& location, const AstArray<AstType*>& types, bool isMissing, unsigned messageIndex)
: AstType(ClassIndex(), location)
, types(types)
, isMissing(isMissing)
, messageIndex(messageIndex)
{
}
void AstTypeError::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstType* type : types)
type->visit(visitor);
}
}
AstTypePackExplicit::AstTypePackExplicit(const Location& location, AstTypeList typeList)
: AstTypePack(ClassIndex(), location)
, typeList(typeList)
{
}
void AstTypePackExplicit::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
{
for (AstType* type : typeList.types)
type->visit(visitor);
if (typeList.tailType)
typeList.tailType->visit(visitor);
}
}
AstTypePackVariadic::AstTypePackVariadic(const Location& location, AstType* variadicType)
: AstTypePack(ClassIndex(), location)
, variadicType(variadicType)
{
}
void AstTypePackVariadic::visit(AstVisitor* visitor)
{
if (visitor->visit(this))
variadicType->visit(visitor);
}
AstTypePackGeneric::AstTypePackGeneric(const Location& location, AstName name)
: AstTypePack(ClassIndex(), location)
, genericName(name)
{
}
void AstTypePackGeneric::visit(AstVisitor* visitor)
{
visitor->visit(this);
}
AstName getIdentifier(AstExpr* node)
{
if (AstExprGlobal* expr = node->as<AstExprGlobal>())
return expr->name;
if (AstExprLocal* expr = node->as<AstExprLocal>())
return expr->local->name;
return AstName();
}
Location getLocation(const AstTypeList& typeList)
{
Location result;
if (typeList.types.size)
{
result = Location{typeList.types.data[0]->location, typeList.types.data[typeList.types.size - 1]->location};
}
if (typeList.tailType)
result.end = typeList.tailType->location.end;
return result;
}
} // namespace Luau

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,131 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Location.h"
namespace Luau
{
Position::Position(unsigned int line, unsigned int column)
: line(line)
, column(column)
{
}
bool Position::operator==(const Position& rhs) const
{
return this->column == rhs.column && this->line == rhs.line;
}
bool Position::operator!=(const Position& rhs) const
{
return !(*this == rhs);
}
bool Position::operator<(const Position& rhs) const
{
if (line == rhs.line)
return column < rhs.column;
else
return line < rhs.line;
}
bool Position::operator>(const Position& rhs) const
{
if (line == rhs.line)
return column > rhs.column;
else
return line > rhs.line;
}
bool Position::operator<=(const Position& rhs) const
{
return *this == rhs || *this < rhs;
}
bool Position::operator>=(const Position& rhs) const
{
return *this == rhs || *this > rhs;
}
void Position::shift(const Position& start, const Position& oldEnd, const Position& newEnd)
{
if (*this >= start)
{
if (this->line > oldEnd.line)
this->line += (newEnd.line - oldEnd.line);
else
{
this->line = newEnd.line;
this->column += (newEnd.column - oldEnd.column);
}
}
}
Location::Location()
: begin(0, 0)
, end(0, 0)
{
}
Location::Location(const Position& begin, const Position& end)
: begin(begin)
, end(end)
{
}
Location::Location(const Position& begin, unsigned int length)
: begin(begin)
, end(begin.line, begin.column + length)
{
}
Location::Location(const Location& begin, const Location& end)
: begin(begin.begin)
, end(end.end)
{
}
bool Location::operator==(const Location& rhs) const
{
return this->begin == rhs.begin && this->end == rhs.end;
}
bool Location::operator!=(const Location& rhs) const
{
return !(*this == rhs);
}
bool Location::encloses(const Location& l) const
{
return begin <= l.begin && end >= l.end;
}
bool Location::overlaps(const Location& l) const
{
return (begin <= l.begin && end >= l.begin) || (begin <= l.end && end >= l.end) || (begin >= l.begin && end <= l.end);
}
bool Location::contains(const Position& p) const
{
return begin <= p && p < end;
}
bool Location::containsClosed(const Position& p) const
{
return begin <= p && p <= end;
}
void Location::extend(const Location& other)
{
if (other.begin < begin)
begin = other.begin;
if (other.end > end)
end = other.end;
}
void Location::shift(const Position& start, const Position& oldEnd, const Position& newEnd)
{
begin.shift(start, oldEnd, newEnd);
end.shift(start, oldEnd, newEnd);
}
} // namespace Luau

File diff suppressed because it is too large Load Diff

View File

@ -1,297 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/StringUtils.h"
#include "Luau/Common.h"
#include <array>
#include <vector>
#include <string>
#include <string.h>
#include <stdint.h>
namespace Luau
{
void vformatAppend(std::string& ret, const char* fmt, va_list args)
{
va_list argscopy;
va_copy(argscopy, args);
#ifdef _MSC_VER
int actualSize = _vscprintf(fmt, argscopy);
#else
int actualSize = vsnprintf(NULL, 0, fmt, argscopy);
#endif
va_end(argscopy);
if (actualSize <= 0)
return;
size_t sz = ret.size();
ret.resize(sz + actualSize);
vsnprintf(&ret[0] + sz, actualSize + 1, fmt, args);
}
std::string format(const char* fmt, ...)
{
std::string result;
va_list args;
va_start(args, fmt);
vformatAppend(result, fmt, args);
va_end(args);
return result;
}
void formatAppend(std::string& str, const char* fmt, ...)
{
va_list args;
va_start(args, fmt);
vformatAppend(str, fmt, args);
va_end(args);
}
std::string vformat(const char* fmt, va_list args)
{
std::string ret;
vformatAppend(ret, fmt, args);
return ret;
}
template<typename String>
static std::string joinImpl(const std::vector<String>& segments, std::string_view delimiter)
{
if (segments.empty())
return "";
size_t len = (segments.size() - 1) * delimiter.size();
for (const auto& sv : segments)
len += sv.size();
std::string result;
result.resize(len);
char* dest = const_cast<char*>(result.data()); // This const_cast is only necessary until C++17
auto it = segments.begin();
memcpy(dest, it->data(), it->size());
dest += it->size();
++it;
for (; it != segments.end(); ++it)
{
memcpy(dest, delimiter.data(), delimiter.size());
dest += delimiter.size();
memcpy(dest, it->data(), it->size());
dest += it->size();
}
LUAU_ASSERT(dest == result.data() + len);
return result;
}
std::string join(const std::vector<std::string_view>& segments, std::string_view delimiter)
{
return joinImpl(segments, delimiter);
}
std::string join(const std::vector<std::string>& segments, std::string_view delimiter)
{
return joinImpl(segments, delimiter);
}
std::vector<std::string_view> split(std::string_view s, char delimiter)
{
std::vector<std::string_view> result;
while (!s.empty())
{
auto index = s.find(delimiter);
if (index == std::string::npos)
{
result.push_back(s);
break;
}
result.push_back(s.substr(0, index));
s.remove_prefix(index + 1);
}
return result;
}
size_t editDistance(std::string_view a, std::string_view b)
{
// When there are matching prefix and suffix, they end up computing as zero cost, effectively making it no-op. We drop these characters.
while (!a.empty() && !b.empty() && a.front() == b.front())
{
a.remove_prefix(1);
b.remove_prefix(1);
}
while (!a.empty() && !b.empty() && a.back() == b.back())
{
a.remove_suffix(1);
b.remove_suffix(1);
}
// Since we know the edit distance is the difference of the length of A and B discounting the matching prefixes and suffixes,
// it is therefore pointless to run the rest of this function to find that out. We immediately infer this size and return it.
if (a.empty())
return b.size();
if (b.empty())
return a.size();
size_t maxDistance = a.size() + b.size();
std::vector<size_t> distances((a.size() + 2) * (b.size() + 2), 0);
auto getPos = [b](size_t x, size_t y) -> size_t {
return (x * (b.size() + 2)) + y;
};
distances[0] = maxDistance;
for (size_t x = 0; x <= a.size(); ++x)
{
distances[getPos(x + 1, 0)] = maxDistance;
distances[getPos(x + 1, 1)] = x;
}
for (size_t y = 0; y <= b.size(); ++y)
{
distances[getPos(0, y + 1)] = maxDistance;
distances[getPos(1, y + 1)] = y;
}
std::array<size_t, 256> seenCharToRow;
seenCharToRow.fill(0);
for (size_t x = 1; x <= a.size(); ++x)
{
size_t lastMatchedY = 0;
for (size_t y = 1; y <= b.size(); ++y)
{
// The value of b[N] can be negative with unicode characters
unsigned char bSeenCharIndex = static_cast<unsigned char>(b[y - 1]);
size_t x1 = seenCharToRow[bSeenCharIndex];
size_t y1 = lastMatchedY;
size_t cost = 1;
if (a[x - 1] == b[y - 1])
{
cost = 0;
lastMatchedY = y;
}
size_t transposition = distances[getPos(x1, y1)] + (x - x1 - 1) + 1 + (y - y1 - 1);
size_t substitution = distances[getPos(x, y)] + cost;
size_t insertion = distances[getPos(x, y + 1)] + 1;
size_t deletion = distances[getPos(x + 1, y)] + 1;
// It's more performant to use std::min(size_t, size_t) rather than the initializer_list overload.
// Until proven otherwise, please do not change this.
distances[getPos(x + 1, y + 1)] = std::min(std::min(insertion, deletion), std::min(substitution, transposition));
}
// The value of a[N] can be negative with unicode characters
unsigned char aSeenCharIndex = static_cast<unsigned char>(a[x - 1]);
seenCharToRow[aSeenCharIndex] = x;
}
return distances[getPos(a.size() + 1, b.size() + 1)];
}
bool startsWith(std::string_view haystack, std::string_view needle)
{
// ::starts_with is C++20
return haystack.size() >= needle.size() && haystack.substr(0, needle.size()) == needle;
}
bool equalsLower(std::string_view lhs, std::string_view rhs)
{
if (lhs.size() != rhs.size())
return false;
for (size_t i = 0; i < lhs.size(); ++i)
if (tolower(uint8_t(lhs[i])) != tolower(uint8_t(rhs[i])))
return false;
return true;
}
size_t hashRange(const char* data, size_t size)
{
// FNV-1a
uint32_t hash = 2166136261;
for (size_t i = 0; i < size; ++i)
{
hash ^= uint8_t(data[i]);
hash *= 16777619;
}
return hash;
}
bool isIdentifier(std::string_view s)
{
return (s.find_first_not_of("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890_") == std::string::npos);
}
std::string escape(std::string_view s, bool escapeForInterpString)
{
std::string r;
r.reserve(s.size() + 50); // arbitrary number to guess how many characters we'll be inserting
for (uint8_t c : s)
{
if (c >= ' ' && c != '\\' && c != '\'' && c != '\"' && c != '`' && c != '{')
r += c;
else
{
r += '\\';
if (escapeForInterpString && (c == '`' || c == '{'))
{
r += c;
continue;
}
switch (c)
{
case '\a':
r += 'a';
break;
case '\b':
r += 'b';
break;
case '\f':
r += 'f';
break;
case '\n':
r += 'n';
break;
case '\r':
r += 'r';
break;
case '\t':
r += 't';
break;
case '\v':
r += 'v';
break;
case '\'':
r += '\'';
break;
case '\"':
r += '\"';
break;
case '\\':
r += '\\';
break;
default:
Luau::formatAppend(r, "%03u", c);
}
}
}
return r;
}
} // namespace Luau

View File

@ -1,269 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/TimeTrace.h"
#include "Luau/StringUtils.h"
#include <mutex>
#include <string>
#include <stdlib.h>
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <Windows.h>
#endif
#ifdef __APPLE__
#include <mach/mach.h>
#include <mach/mach_time.h>
#endif
#include <time.h>
LUAU_FASTFLAGVARIABLE(DebugLuauTimeTracing, false)
namespace Luau
{
namespace TimeTrace
{
static double getClockPeriod()
{
#if defined(_WIN32)
LARGE_INTEGER result = {};
QueryPerformanceFrequency(&result);
return 1.0 / double(result.QuadPart);
#elif defined(__APPLE__)
mach_timebase_info_data_t result = {};
mach_timebase_info(&result);
return double(result.numer) / double(result.denom) * 1e-9;
#elif defined(__linux__)
return 1e-9;
#else
return 1.0 / double(CLOCKS_PER_SEC);
#endif
}
static double getClockTimestamp()
{
#if defined(_WIN32)
LARGE_INTEGER result = {};
QueryPerformanceCounter(&result);
return double(result.QuadPart);
#elif defined(__APPLE__)
return double(mach_absolute_time());
#elif defined(__linux__)
timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return now.tv_sec * 1e9 + now.tv_nsec;
#else
return double(clock());
#endif
}
double getClock()
{
static double period = getClockPeriod();
static double start = getClockTimestamp();
return (getClockTimestamp() - start) * period;
}
uint32_t getClockMicroseconds()
{
static double period = getClockPeriod() * 1e6;
static double start = getClockTimestamp();
return uint32_t((getClockTimestamp() - start) * period);
}
} // namespace TimeTrace
} // namespace Luau
#if defined(LUAU_ENABLE_TIME_TRACE)
namespace Luau
{
namespace TimeTrace
{
struct GlobalContext
{
GlobalContext() = default;
~GlobalContext()
{
// Ideally we would want all ThreadContext destructors to run
// But in VS, not all thread_local object instances are destroyed
for (ThreadContext* context : threads)
{
if (!context->events.empty())
context->flushEvents();
}
if (traceFile)
fclose(traceFile);
}
std::mutex mutex;
std::vector<ThreadContext*> threads;
uint32_t nextThreadId = 0;
std::vector<Token> tokens;
FILE* traceFile = nullptr;
};
GlobalContext& getGlobalContext()
{
static GlobalContext context;
return context;
}
uint16_t createToken(GlobalContext& context, const char* name, const char* category)
{
std::scoped_lock lock(context.mutex);
LUAU_ASSERT(context.tokens.size() < 64 * 1024);
context.tokens.push_back({name, category});
return uint16_t(context.tokens.size() - 1);
}
uint32_t createThread(GlobalContext& context, ThreadContext* threadContext)
{
std::scoped_lock lock(context.mutex);
context.threads.push_back(threadContext);
return ++context.nextThreadId;
}
void releaseThread(GlobalContext& context, ThreadContext* threadContext)
{
std::scoped_lock lock(context.mutex);
if (auto it = std::find(context.threads.begin(), context.threads.end(), threadContext); it != context.threads.end())
context.threads.erase(it);
}
void flushEvents(GlobalContext& context, uint32_t threadId, const std::vector<Event>& events, const std::vector<char>& data)
{
std::scoped_lock lock(context.mutex);
if (!context.traceFile)
{
context.traceFile = fopen("trace.json", "w");
if (!context.traceFile)
return;
fprintf(context.traceFile, "[\n");
}
std::string temp;
const unsigned tempReserve = 64 * 1024;
temp.reserve(tempReserve);
const char* rawData = data.data();
// Formatting state
bool unfinishedEnter = false;
bool unfinishedArgs = false;
for (const Event& ev : events)
{
switch (ev.type)
{
case EventType::Enter:
{
if (unfinishedArgs)
{
formatAppend(temp, "}");
unfinishedArgs = false;
}
if (unfinishedEnter)
{
formatAppend(temp, "},\n");
unfinishedEnter = false;
}
Token& token = context.tokens[ev.token];
formatAppend(temp, R"({"name": "%s", "cat": "%s", "ph": "B", "ts": %u, "pid": 0, "tid": %u)", token.name, token.category,
ev.data.microsec, threadId);
unfinishedEnter = true;
}
break;
case EventType::Leave:
if (unfinishedArgs)
{
formatAppend(temp, "}");
unfinishedArgs = false;
}
if (unfinishedEnter)
{
formatAppend(temp, "},\n");
unfinishedEnter = false;
}
formatAppend(temp,
R"({"ph": "E", "ts": %u, "pid": 0, "tid": %u},)"
"\n",
ev.data.microsec, threadId);
break;
case EventType::ArgName:
LUAU_ASSERT(unfinishedEnter);
if (!unfinishedArgs)
{
formatAppend(temp, R"(, "args": { "%s": )", rawData + ev.data.dataPos);
unfinishedArgs = true;
}
else
{
formatAppend(temp, R"(, "%s": )", rawData + ev.data.dataPos);
}
break;
case EventType::ArgValue:
LUAU_ASSERT(unfinishedArgs);
formatAppend(temp, R"("%s")", rawData + ev.data.dataPos);
break;
}
// Don't want to hit the string capacity and reallocate
if (temp.size() > tempReserve - 1024)
{
fwrite(temp.data(), 1, temp.size(), context.traceFile);
temp.clear();
}
}
if (unfinishedArgs)
{
formatAppend(temp, "}");
unfinishedArgs = false;
}
if (unfinishedEnter)
{
formatAppend(temp, "},\n");
unfinishedEnter = false;
}
fwrite(temp.data(), 1, temp.size(), context.traceFile);
fflush(context.traceFile);
}
ThreadContext& getThreadContext()
{
thread_local ThreadContext context;
return context;
}
uint16_t createScopeData(const char* name, const char* category)
{
return createToken(Luau::TimeTrace::getGlobalContext(), name, category);
}
} // namespace TimeTrace
} // namespace Luau
#endif

View File

@ -1,59 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/RegisterA64.h"
#include <stddef.h>
namespace Luau
{
namespace CodeGen
{
namespace A64
{
enum class AddressKindA64 : uint8_t
{
reg, // reg + reg
imm, // reg + imm
pre, // reg + imm, reg += imm
post, // reg, reg += imm
};
struct AddressA64
{
// This is a little misleading since AddressA64 can encode offsets up to 1023*size where size depends on the load/store size
// For example, ldr x0, [reg+imm] is limited to 8 KB offsets assuming imm is divisible by 8, but loading into w0 reduces the range to 4 KB
static constexpr size_t kMaxOffset = 1023;
constexpr AddressA64(RegisterA64 base, int off = 0, AddressKindA64 kind = AddressKindA64::imm)
: kind(kind)
, base(base)
, offset(xzr)
, data(off)
{
LUAU_ASSERT(base.kind == KindA64::x || base == sp);
LUAU_ASSERT(kind != AddressKindA64::reg);
}
constexpr AddressA64(RegisterA64 base, RegisterA64 offset)
: kind(AddressKindA64::reg)
, base(base)
, offset(offset)
, data(0)
{
LUAU_ASSERT(base.kind == KindA64::x);
LUAU_ASSERT(offset.kind == KindA64::x);
}
AddressKindA64 kind;
RegisterA64 base;
RegisterA64 offset;
int data;
};
using mem = AddressA64;
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,282 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/RegisterA64.h"
#include "Luau/AddressA64.h"
#include "Luau/ConditionA64.h"
#include "Luau/Label.h"
#include <string>
#include <vector>
namespace Luau
{
namespace CodeGen
{
namespace A64
{
enum FeaturesA64
{
Feature_JSCVT = 1 << 0,
};
class AssemblyBuilderA64
{
public:
explicit AssemblyBuilderA64(bool logText, unsigned int features = 0);
~AssemblyBuilderA64();
// Moves
void mov(RegisterA64 dst, RegisterA64 src);
void mov(RegisterA64 dst, int src); // macro
// Moves of 32-bit immediates get decomposed into one or more of these
void movz(RegisterA64 dst, uint16_t src, int shift = 0);
void movn(RegisterA64 dst, uint16_t src, int shift = 0);
void movk(RegisterA64 dst, uint16_t src, int shift = 0);
// Arithmetics
void add(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void add(RegisterA64 dst, RegisterA64 src1, uint16_t src2);
void sub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void sub(RegisterA64 dst, RegisterA64 src1, uint16_t src2);
void neg(RegisterA64 dst, RegisterA64 src);
// Comparisons
// Note: some arithmetic instructions also have versions that update flags (ADDS etc) but we aren't using them atm
void cmp(RegisterA64 src1, RegisterA64 src2);
void cmp(RegisterA64 src1, uint16_t src2);
void csel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond);
void cset(RegisterA64 dst, ConditionA64 cond);
// Bitwise
void and_(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void orr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void eor(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void bic(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void tst(RegisterA64 src1, RegisterA64 src2, int shift = 0);
void mvn_(RegisterA64 dst, RegisterA64 src);
// Bitwise with immediate
// Note: immediate must have a single contiguous sequence of 1 bits set of length 1..31
void and_(RegisterA64 dst, RegisterA64 src1, uint32_t src2);
void orr(RegisterA64 dst, RegisterA64 src1, uint32_t src2);
void eor(RegisterA64 dst, RegisterA64 src1, uint32_t src2);
void tst(RegisterA64 src1, uint32_t src2);
// Shifts
void lsl(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void lsr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void asr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void ror(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void clz(RegisterA64 dst, RegisterA64 src);
void rbit(RegisterA64 dst, RegisterA64 src);
// Shifts with immediates
// Note: immediate value must be in [0, 31] or [0, 63] range based on register type
void lsl(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
void lsr(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
void asr(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
void ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
// Bitfields
void ubfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w);
void ubfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w);
void sbfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w);
void sbfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w);
// Load
// Note: paired loads are currently omitted for simplicity
void ldr(RegisterA64 dst, AddressA64 src);
void ldrb(RegisterA64 dst, AddressA64 src);
void ldrh(RegisterA64 dst, AddressA64 src);
void ldrsb(RegisterA64 dst, AddressA64 src);
void ldrsh(RegisterA64 dst, AddressA64 src);
void ldrsw(RegisterA64 dst, AddressA64 src);
void ldp(RegisterA64 dst1, RegisterA64 dst2, AddressA64 src);
// Store
void str(RegisterA64 src, AddressA64 dst);
void strb(RegisterA64 src, AddressA64 dst);
void strh(RegisterA64 src, AddressA64 dst);
void stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst);
// Control flow
void b(Label& label);
void bl(Label& label);
void br(RegisterA64 src);
void blr(RegisterA64 src);
void ret();
// Conditional control flow
void b(ConditionA64 cond, Label& label);
void cbz(RegisterA64 src, Label& label);
void cbnz(RegisterA64 src, Label& label);
void tbz(RegisterA64 src, uint8_t bit, Label& label);
void tbnz(RegisterA64 src, uint8_t bit, Label& label);
// Address of embedded data
void adr(RegisterA64 dst, const void* ptr, size_t size);
void adr(RegisterA64 dst, uint64_t value);
void adr(RegisterA64 dst, double value);
// Address of code (label)
void adr(RegisterA64 dst, Label& label);
// Floating-point scalar moves
// Note: constant must be compatible with immediate floating point moves (see isFmovSupported)
void fmov(RegisterA64 dst, RegisterA64 src);
void fmov(RegisterA64 dst, double src);
// Floating-point scalar math
void fabs(RegisterA64 dst, RegisterA64 src);
void fadd(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void fdiv(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void fmul(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void fneg(RegisterA64 dst, RegisterA64 src);
void fsqrt(RegisterA64 dst, RegisterA64 src);
void fsub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
// Floating-point rounding and conversions
void frinta(RegisterA64 dst, RegisterA64 src);
void frintm(RegisterA64 dst, RegisterA64 src);
void frintp(RegisterA64 dst, RegisterA64 src);
void fcvt(RegisterA64 dst, RegisterA64 src);
void fcvtzs(RegisterA64 dst, RegisterA64 src);
void fcvtzu(RegisterA64 dst, RegisterA64 src);
void scvtf(RegisterA64 dst, RegisterA64 src);
void ucvtf(RegisterA64 dst, RegisterA64 src);
// Floating-point conversion to integer using JS rules (wrap around 2^32) and set Z flag
// note: this is part of ARM8.3 (JSCVT feature); support of this instruction needs to be checked at runtime
void fjcvtzs(RegisterA64 dst, RegisterA64 src);
// Floating-point comparisons
void fcmp(RegisterA64 src1, RegisterA64 src2);
void fcmpz(RegisterA64 src);
void fcsel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond);
void udf();
// Run final checks
bool finalize();
// Places a label at current location and returns it
Label setLabel();
// Assigns label position to the current location
void setLabel(Label& label);
// Extracts code offset (in bytes) from label
uint32_t getLabelOffset(const Label& label)
{
LUAU_ASSERT(label.location != ~0u);
return label.location * 4;
}
void logAppend(const char* fmt, ...) LUAU_PRINTF_ATTR(2, 3);
uint32_t getCodeSize() const;
// Resulting data and code that need to be copied over one after the other
// The *end* of 'data' has to be aligned to 16 bytes, this will also align 'code'
std::vector<uint8_t> data;
std::vector<uint32_t> code;
std::string text;
const bool logText = false;
const unsigned int features = 0;
// Maximum immediate argument to functions like add/sub/cmp
static constexpr size_t kMaxImmediate = (1 << 12) - 1;
// Check if immediate mode mask is supported for bitwise operations (and/or/xor)
static bool isMaskSupported(uint32_t mask);
// Check if fmov can be used to synthesize a constant
static bool isFmovSupported(double value);
private:
// Instruction archetypes
void place0(const char* name, uint32_t word);
void placeSR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, int shift = 0, int N = 0);
void placeSR2(const char* name, RegisterA64 dst, RegisterA64 src, uint8_t op, uint8_t op2 = 0);
void placeR3(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, uint8_t op, uint8_t op2);
void placeR1(const char* name, RegisterA64 dst, RegisterA64 src, uint32_t op);
void placeI12(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op);
void placeI16(const char* name, RegisterA64 dst, int src, uint8_t op, int shift = 0);
void placeA(const char* name, RegisterA64 dst, AddressA64 src, uint16_t opsize, int sizelog);
void placeB(const char* name, Label& label, uint8_t op);
void placeBC(const char* name, Label& label, uint8_t op, uint8_t cond);
void placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond);
void placeBR(const char* name, RegisterA64 src, uint32_t op);
void placeBTR(const char* name, Label& label, uint8_t op, RegisterA64 cond, uint8_t bit);
void placeADR(const char* name, RegisterA64 src, uint8_t op);
void placeADR(const char* name, RegisterA64 src, uint8_t op, Label& label);
void placeP(const char* name, RegisterA64 dst1, RegisterA64 dst2, AddressA64 src, uint8_t op, uint8_t opc, int sizelog);
void placeCS(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond, uint8_t op, uint8_t opc, int invert = 0);
void placeFCMP(const char* name, RegisterA64 src1, RegisterA64 src2, uint8_t op, uint8_t opc);
void placeFMOV(const char* name, RegisterA64 dst, double src, uint32_t op);
void placeBM(const char* name, RegisterA64 dst, RegisterA64 src1, uint32_t src2, uint8_t op);
void placeBFM(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op, int immr, int imms);
void place(uint32_t word);
struct Patch
{
enum Kind
{
Imm26,
Imm19,
Imm14,
};
Kind kind : 2;
uint32_t label : 30;
uint32_t location;
};
void patchLabel(Label& label, Patch::Kind kind);
void patchOffset(uint32_t location, int value, Patch::Kind kind);
void commit();
LUAU_NOINLINE void extend();
// Data
size_t allocateData(size_t size, size_t align);
// Logging of assembly in text form
LUAU_NOINLINE void log(const char* opcode);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src1, int src2);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, int src, int shift = 0);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, double src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, AddressA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst1, RegisterA64 dst2, AddressA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 src, Label label, int imm = -1);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 src);
LUAU_NOINLINE void log(const char* opcode, Label label);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond);
LUAU_NOINLINE void log(Label label);
LUAU_NOINLINE void log(RegisterA64 reg);
LUAU_NOINLINE void log(AddressA64 addr);
uint32_t nextLabel = 1;
std::vector<Patch> pendingLabels;
std::vector<uint32_t> labelLocations;
bool finalized = false;
bool overflowed = false;
size_t dataPos = 0;
uint32_t* codePos = nullptr;
uint32_t* codeEnd = nullptr;
};
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,270 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include "Luau/DenseHash.h"
#include "Luau/Label.h"
#include "Luau/ConditionX64.h"
#include "Luau/OperandX64.h"
#include "Luau/RegisterX64.h"
#include <string>
#include <vector>
namespace Luau
{
namespace CodeGen
{
namespace X64
{
enum class RoundingModeX64
{
RoundToNearestEven = 0b00,
RoundToNegativeInfinity = 0b01,
RoundToPositiveInfinity = 0b10,
RoundToZero = 0b11,
};
enum class AlignmentDataX64
{
Nop,
Int3,
Ud2, // int3 will be used as a fall-back if it doesn't fit
};
enum class ABIX64
{
Windows,
SystemV,
};
class AssemblyBuilderX64
{
public:
explicit AssemblyBuilderX64(bool logText, ABIX64 abi);
explicit AssemblyBuilderX64(bool logText);
~AssemblyBuilderX64();
// Base two operand instructions with 9 opcode selection
void add(OperandX64 lhs, OperandX64 rhs);
void sub(OperandX64 lhs, OperandX64 rhs);
void cmp(OperandX64 lhs, OperandX64 rhs);
void and_(OperandX64 lhs, OperandX64 rhs);
void or_(OperandX64 lhs, OperandX64 rhs);
void xor_(OperandX64 lhs, OperandX64 rhs);
// Binary shift instructions with special rhs handling
void sal(OperandX64 lhs, OperandX64 rhs);
void sar(OperandX64 lhs, OperandX64 rhs);
void shl(OperandX64 lhs, OperandX64 rhs);
void shr(OperandX64 lhs, OperandX64 rhs);
void rol(OperandX64 lhs, OperandX64 rhs);
void ror(OperandX64 lhs, OperandX64 rhs);
// Two operand mov instruction has additional specialized encodings
void mov(OperandX64 lhs, OperandX64 rhs);
void mov64(RegisterX64 lhs, int64_t imm);
void movsx(RegisterX64 lhs, OperandX64 rhs);
void movzx(RegisterX64 lhs, OperandX64 rhs);
// Base one operand instruction with 2 opcode selection
void div(OperandX64 op);
void idiv(OperandX64 op);
void mul(OperandX64 op);
void imul(OperandX64 op);
void neg(OperandX64 op);
void not_(OperandX64 op);
void dec(OperandX64 op);
void inc(OperandX64 op);
// Additional forms of imul
void imul(OperandX64 lhs, OperandX64 rhs);
void imul(OperandX64 dst, OperandX64 lhs, int32_t rhs);
void test(OperandX64 lhs, OperandX64 rhs);
void lea(OperandX64 lhs, OperandX64 rhs);
void setcc(ConditionX64 cond, OperandX64 op);
void push(OperandX64 op);
void pop(OperandX64 op);
void ret();
// Control flow
void jcc(ConditionX64 cond, Label& label);
void jmp(Label& label);
void jmp(OperandX64 op);
void call(Label& label);
void call(OperandX64 op);
void lea(RegisterX64 lhs, Label& label);
void int3();
void ud2();
void bsr(RegisterX64 dst, OperandX64 src);
void bsf(RegisterX64 dst, OperandX64 src);
// Code alignment
void nop(uint32_t length = 1);
void align(uint32_t alignment, AlignmentDataX64 data = AlignmentDataX64::Nop);
// AVX
void vaddpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vaddps(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vaddsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vaddss(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vsubsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vmulsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vdivsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vandpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vandnpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vxorpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vorpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vucomisd(OperandX64 src1, OperandX64 src2);
void vcvttsd2si(OperandX64 dst, OperandX64 src);
void vcvtsi2sd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vcvtsd2ss(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vroundsd(OperandX64 dst, OperandX64 src1, OperandX64 src2, RoundingModeX64 roundingMode); // inexact
void vsqrtpd(OperandX64 dst, OperandX64 src);
void vsqrtps(OperandX64 dst, OperandX64 src);
void vsqrtsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vsqrtss(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vmovsd(OperandX64 dst, OperandX64 src);
void vmovsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vmovss(OperandX64 dst, OperandX64 src);
void vmovss(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vmovapd(OperandX64 dst, OperandX64 src);
void vmovaps(OperandX64 dst, OperandX64 src);
void vmovupd(OperandX64 dst, OperandX64 src);
void vmovups(OperandX64 dst, OperandX64 src);
void vmovq(OperandX64 lhs, OperandX64 rhs);
void vmaxsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vminsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vcmpltsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vblendvpd(RegisterX64 dst, RegisterX64 src1, OperandX64 mask, RegisterX64 src3);
// Run final checks
bool finalize();
// Places a label at current location and returns it
Label setLabel();
// Assigns label position to the current location
void setLabel(Label& label);
// Extracts code offset (in bytes) from label
uint32_t getLabelOffset(const Label& label)
{
LUAU_ASSERT(label.location != ~0u);
return label.location;
}
// Constant allocation (uses rip-relative addressing)
OperandX64 i64(int64_t value);
OperandX64 f32(float value);
OperandX64 f64(double value);
OperandX64 f32x4(float x, float y, float z, float w);
OperandX64 f64x2(double x, double y);
OperandX64 bytes(const void* ptr, size_t size, size_t align = 8);
void logAppend(const char* fmt, ...) LUAU_PRINTF_ATTR(2, 3);
uint32_t getCodeSize() const;
// Resulting data and code that need to be copied over one after the other
// The *end* of 'data' has to be aligned to 16 bytes, this will also align 'code'
std::vector<uint8_t> data;
std::vector<uint8_t> code;
std::string text;
const bool logText = false;
const ABIX64 abi;
private:
// Instruction archetypes
void placeBinary(const char* name, OperandX64 lhs, OperandX64 rhs, uint8_t codeimm8, uint8_t codeimm, uint8_t codeimmImm8, uint8_t code8rev,
uint8_t coderev, uint8_t code8, uint8_t code, uint8_t opreg);
void placeBinaryRegMemAndImm(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code, uint8_t codeImm8, uint8_t opreg);
void placeBinaryRegAndRegMem(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code);
void placeBinaryRegMemAndReg(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code);
void placeUnaryModRegMem(const char* name, OperandX64 op, uint8_t code8, uint8_t code, uint8_t opreg);
void placeShift(const char* name, OperandX64 lhs, OperandX64 rhs, uint8_t opreg);
void placeJcc(const char* name, Label& label, uint8_t cc);
void placeAvx(const char* name, OperandX64 dst, OperandX64 src, uint8_t code, bool setW, uint8_t mode, uint8_t prefix);
void placeAvx(const char* name, OperandX64 dst, OperandX64 src, uint8_t code, uint8_t coderev, bool setW, uint8_t mode, uint8_t prefix);
void placeAvx(const char* name, OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t code, bool setW, uint8_t mode, uint8_t prefix);
void placeAvx(
const char* name, OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t imm8, uint8_t code, bool setW, uint8_t mode, uint8_t prefix);
// Instruction components
void placeRegAndModRegMem(OperandX64 lhs, OperandX64 rhs, int32_t extraCodeBytes = 0);
void placeModRegMem(OperandX64 rhs, uint8_t regop, int32_t extraCodeBytes = 0);
void placeRex(RegisterX64 op);
void placeRex(OperandX64 op);
void placeRexNoW(OperandX64 op);
void placeRex(RegisterX64 lhs, OperandX64 rhs);
void placeVex(OperandX64 dst, OperandX64 src1, OperandX64 src2, bool setW, uint8_t mode, uint8_t prefix);
void placeImm8Or32(int32_t imm);
void placeImm8(int32_t imm);
void placeImm32(int32_t imm);
void placeImm64(int64_t imm);
void placeLabel(Label& label);
void place(uint8_t byte);
void commit();
LUAU_NOINLINE void extend();
// Data
size_t allocateData(size_t size, size_t align);
// Logging of assembly in text form (Intel asm with VS disassembly formatting)
LUAU_NOINLINE void log(const char* opcode);
LUAU_NOINLINE void log(const char* opcode, OperandX64 op);
LUAU_NOINLINE void log(const char* opcode, OperandX64 op1, OperandX64 op2);
LUAU_NOINLINE void log(const char* opcode, OperandX64 op1, OperandX64 op2, OperandX64 op3);
LUAU_NOINLINE void log(const char* opcode, OperandX64 op1, OperandX64 op2, OperandX64 op3, OperandX64 op4);
LUAU_NOINLINE void log(Label label);
LUAU_NOINLINE void log(const char* opcode, Label label);
LUAU_NOINLINE void log(const char* opcode, RegisterX64 reg, Label label);
void log(OperandX64 op);
const char* getSizeName(SizeX64 size) const;
const char* getRegisterName(RegisterX64 reg) const;
uint32_t nextLabel = 1;
std::vector<Label> pendingLabels;
std::vector<uint32_t> labelLocations;
DenseHashMap<uint64_t, int32_t> constCache64;
bool finalized = false;
size_t dataPos = 0;
uint8_t* codePos = nullptr;
uint8_t* codeEnd = nullptr;
};
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,56 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <vector>
#include <stddef.h>
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
constexpr uint32_t kCodeAlignment = 32;
struct CodeAllocator
{
CodeAllocator(size_t blockSize, size_t maxTotalSize);
~CodeAllocator();
// Places data and code into the executable page area
// To allow allocation while previously allocated code is already running, allocation has page granularity
// It's important to group functions together so that page alignment won't result in a lot of wasted space
bool allocate(
const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize, uint8_t*& result, size_t& resultSize, uint8_t*& resultCodeStart);
// Provided to callbacks
void* context = nullptr;
// Called when new block is created to create and setup the unwinding information for all the code in the block
// 'startOffset' reserves space for data at the beginning of the page
void* (*createBlockUnwindInfo)(void* context, uint8_t* block, size_t blockSize, size_t& startOffset) = nullptr;
// Called to destroy unwinding information returned by 'createBlockUnwindInfo'
void (*destroyBlockUnwindInfo)(void* context, void* unwindData) = nullptr;
// Unwind information can be placed inside the block with some implementation-specific reservations at the beginning
// But to simplify block space checks, we limit the max size of all that data
static const size_t kMaxReservedDataSize = 256;
bool allocateNewBlock(size_t& unwindInfoSize);
// Current block we use for allocations
uint8_t* blockPos = nullptr;
uint8_t* blockEnd = nullptr;
// All allocated blocks
std::vector<uint8_t*> blocks;
std::vector<void*> unwindInfos;
size_t blockSize = 0;
size_t maxTotalSize = 0;
};
} // namespace CodeGen
} // namespace Luau

View File

@ -1,19 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <stddef.h>
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
// context must be an UnwindBuilder
void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, size_t& startOffset);
void destroyBlockUnwindInfo(void* context, void* unwindData);
bool isUnwindSupported();
} // namespace CodeGen
} // namespace Luau

View File

@ -1,45 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <string>
#include <stdint.h>
struct lua_State;
namespace Luau
{
namespace CodeGen
{
bool isSupported();
void create(lua_State* L);
// Builds target function and all inner functions
void compile(lua_State* L, int idx);
using AnnotatorFn = void (*)(void* context, std::string& result, int fid, int instpos);
struct AssemblyOptions
{
bool outputBinary = false;
bool includeAssembly = false;
bool includeIr = false;
bool includeOutlinedCode = false;
// Optional annotator function can be provided to describe each instruction, it takes function id and sequential instruction id
AnnotatorFn annotator = nullptr;
void* annotatorContext = nullptr;
};
// Generates assembly for target function and all inner functions
std::string getAssembly(lua_State* L, int idx, AssemblyOptions options = {});
using PerfLogFn = void (*)(void* context, uintptr_t addr, unsigned size, const char* symbol);
void setPerfLog(void* context, PerfLogFn logFn);
} // namespace CodeGen
} // namespace Luau

View File

@ -1,57 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
namespace A64
{
// See Table C1-1 on page C1-229 of Arm ARM for A-profile architecture
enum class ConditionA64
{
// EQ: integer (equal), floating-point (equal)
Equal,
// NE: integer (not equal), floating-point (not equal or unordered)
NotEqual,
// CS: integer (carry set), unsigned integer (greater than, equal), floating-point (greater than, equal or unordered)
CarrySet,
// CC: integer (carry clear), unsigned integer (less than), floating-point (less than)
CarryClear,
// MI: integer (negative), floating-point (less than)
Minus,
// PL: integer (positive or zero), floating-point (greater than, equal or unordered)
Plus,
// VS: integer (overflow), floating-point (unordered)
Overflow,
// VC: integer (no overflow), floating-point (ordered)
NoOverflow,
// HI: integer (unsigned higher), floating-point (greater than, or unordered)
UnsignedGreater,
// LS: integer (unsigned lower or same), floating-point (less than or equal)
UnsignedLessEqual,
// GE: integer (signed greater than or equal), floating-point (greater than or equal)
GreaterEqual,
// LT: integer (signed less than), floating-point (less than, or unordered)
Less,
// GT: integer (signed greater than), floating-point (greater than)
Greater,
// LE: integer (signed less than or equal), floating-point (less than, equal or unordered)
LessEqual,
// AL: always
Always,
Count
};
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,47 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
enum class ConditionX64 : uint8_t
{
Overflow,
NoOverflow,
Carry,
NoCarry,
Below,
BelowEqual,
Above,
AboveEqual,
Equal,
Less,
LessEqual,
Greater,
GreaterEqual,
NotBelow,
NotBelowEqual,
NotAbove,
NotAboveEqual,
NotEqual,
NotLess,
NotLessEqual,
NotGreater,
NotGreaterEqual,
Zero,
NotZero,
Parity,
NotParity,
Count
};
} // namespace CodeGen
} // namespace Luau

View File

@ -1,139 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include <bitset>
#include <utility>
#include <vector>
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
struct IrBlock;
struct IrFunction;
void updateUseCounts(IrFunction& function);
void updateLastUseLocations(IrFunction& function);
uint32_t getNextInstUse(IrFunction& function, uint32_t targetInstIdx, uint32_t startInstIdx);
// Returns how many values are coming into the block (live in) and how many are coming out of the block (live out)
std::pair<uint32_t, uint32_t> getLiveInOutValueCount(IrFunction& function, IrBlock& block);
uint32_t getLiveInValueCount(IrFunction& function, IrBlock& block);
uint32_t getLiveOutValueCount(IrFunction& function, IrBlock& block);
struct RegisterSet
{
std::bitset<256> regs;
// If variadic sequence is active, we track register from which it starts
bool varargSeq = false;
uint8_t varargStart = 0;
};
void requireVariadicSequence(RegisterSet& sourceRs, const RegisterSet& defRs, uint8_t varargStart);
struct BlockOrdering
{
uint32_t depth = 0;
uint32_t preOrder = ~0u;
uint32_t postOrder = ~0u;
bool visited = false;
};
struct CfgInfo
{
std::vector<uint32_t> predecessors;
std::vector<uint32_t> predecessorsOffsets;
std::vector<uint32_t> successors;
std::vector<uint32_t> successorsOffsets;
// Immediate dominators (unique parent in the dominator tree)
std::vector<uint32_t> idoms;
// Children in the dominator tree
std::vector<uint32_t> domChildren;
std::vector<uint32_t> domChildrenOffsets;
std::vector<BlockOrdering> domOrdering;
// VM registers that are live when the block is entered
// Additionally, an active variadic sequence can exist at the entry of the block
std::vector<RegisterSet> in;
// VM registers that are defined inside the block
// It can also contain a variadic sequence definition if that hasn't been consumed inside the block
// Note that this means that checking 'def' set might not be enough to say that register has not been written to
std::vector<RegisterSet> def;
// VM registers that are coming out from the block
// These might be registers that are defined inside the block or have been defined at the entry of the block
// Additionally, an active variadic sequence can exist at the exit of the block
std::vector<RegisterSet> out;
// VM registers captured by nested closures
// This set can never have an active variadic sequence
RegisterSet captured;
};
// A quick refresher on dominance and dominator trees:
// * If A is a dominator of B (A dom B), you can never execute B without executing A first
// * A is a strict dominator of B (A sdom B) is similar to previous one but A != B
// * Immediate dominator node N (idom N) is a unique node T so that T sdom N,
// but T does not strictly dominate any other node that dominates N.
// * Dominance frontier is a set of nodes where dominance of a node X ends.
// In practice this is where values established by node X might no longer hold because of join edges from other nodes coming in.
// This is also where PHI instructions in SSA are placed.
void computeCfgImmediateDominators(IrFunction& function);
void computeCfgDominanceTreeChildren(IrFunction& function);
// Function used to update all CFG data
void computeCfgInfo(IrFunction& function);
struct BlockIteratorWrapper
{
const uint32_t* itBegin = nullptr;
const uint32_t* itEnd = nullptr;
bool empty() const
{
return itBegin == itEnd;
}
size_t size() const
{
return size_t(itEnd - itBegin);
}
const uint32_t* begin() const
{
return itBegin;
}
const uint32_t* end() const
{
return itEnd;
}
uint32_t operator[](size_t pos) const
{
LUAU_ASSERT(pos < size_t(itEnd - itBegin));
return itBegin[pos];
}
};
BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx);
BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx);
BlockIteratorWrapper domChildren(const CfgInfo& cfg, uint32_t blockIdx);
} // namespace CodeGen
} // namespace Luau

View File

@ -1,116 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Bytecode.h"
#include "Luau/Common.h"
#include "Luau/DenseHash.h"
#include "Luau/IrData.h"
#include <vector>
struct Proto;
typedef uint32_t Instruction;
namespace Luau
{
namespace CodeGen
{
struct AssemblyOptions;
struct IrBuilder
{
IrBuilder();
void buildFunctionIr(Proto* proto);
void rebuildBytecodeBasicBlocks(Proto* proto);
void translateInst(LuauOpcode op, const Instruction* pc, int i);
bool isInternalBlock(IrOp block);
void beginBlock(IrOp block);
void loadAndCheckTag(IrOp loc, uint8_t tag, IrOp fallback);
// Clones all instructions into the current block
// Source block that is cloned cannot use values coming in from a predecessor
void clone(const IrBlock& source, bool removeCurrentTerminator);
IrOp undef();
IrOp constInt(int value);
IrOp constUint(unsigned value);
IrOp constDouble(double value);
IrOp constTag(uint8_t value);
IrOp constAny(IrConst constant, uint64_t asCommonKey);
IrOp cond(IrCondition cond);
IrOp inst(IrCmd cmd);
IrOp inst(IrCmd cmd, IrOp a);
IrOp inst(IrCmd cmd, IrOp a, IrOp b);
IrOp inst(IrCmd cmd, IrOp a, IrOp b, IrOp c);
IrOp inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d);
IrOp inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d, IrOp e);
IrOp inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d, IrOp e, IrOp f);
IrOp block(IrBlockKind kind); // Requested kind can be ignored if we are in an outlined sequence
IrOp blockAtInst(uint32_t index);
IrOp vmReg(uint8_t index);
IrOp vmConst(uint32_t index);
IrOp vmUpvalue(uint8_t index);
bool inTerminatedBlock = false;
bool activeFastcallFallback = false;
IrOp fastcallFallbackReturn;
IrFunction function;
uint32_t activeBlockIdx = ~0u;
std::vector<uint32_t> instIndexToBlock; // Block index at the bytecode instruction
// Similar to BytecodeBuilder, duplicate constants are removed used the same method
struct ConstantKey
{
IrConstKind kind;
// Note: this stores value* from IrConst; when kind is Double, this stores the same bits as double does but in uint64_t.
uint64_t value;
bool operator==(const ConstantKey& key) const
{
return kind == key.kind && value == key.value;
}
};
struct ConstantKeyHash
{
size_t operator()(const ConstantKey& key) const
{
// finalizer from MurmurHash64B
const uint32_t m = 0x5bd1e995;
uint32_t h1 = uint32_t(key.value);
uint32_t h2 = uint32_t(key.value >> 32) ^ (int(key.kind) * m);
h1 ^= h2 >> 18;
h1 *= m;
h2 ^= h1 >> 22;
h2 *= m;
h1 ^= h2 >> 17;
h1 *= m;
h2 ^= h1 >> 19;
h2 *= m;
// ... truncated to 32-bit output (normally hash is equal to (uint64_t(h1) << 32) | h2, but we only really need the lower 32-bit half)
return size_t(h2);
}
};
DenseHashMap<ConstantKey, uint32_t, ConstantKeyHash> constantMap;
};
} // namespace CodeGen
} // namespace Luau

View File

@ -1,84 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/IrData.h"
#include "Luau/OperandX64.h"
#include "Luau/RegisterX64.h"
#include <array>
// TODO: call wrapper can be used to suggest target registers for ScopedRegX64 to compute data into argument registers directly
namespace Luau
{
namespace CodeGen
{
namespace X64
{
struct IrRegAllocX64;
struct ScopedRegX64;
struct CallArgument
{
SizeX64 targetSize = SizeX64::none;
OperandX64 source = noreg;
IrOp sourceOp;
OperandX64 target = noreg;
bool candidate = true;
};
class IrCallWrapperX64
{
public:
IrCallWrapperX64(IrRegAllocX64& regs, AssemblyBuilderX64& build, uint32_t instIdx = kInvalidInstIdx);
void addArgument(SizeX64 targetSize, OperandX64 source, IrOp sourceOp = {});
void addArgument(SizeX64 targetSize, ScopedRegX64& scopedReg);
void call(const OperandX64& func);
RegisterX64 suggestNextArgumentRegister(SizeX64 size) const;
IrRegAllocX64& regs;
AssemblyBuilderX64& build;
uint32_t instIdx = ~0u;
private:
OperandX64 getNextArgumentTarget(SizeX64 size) const;
void countRegisterUses();
CallArgument* findNonInterferingArgument();
bool interferesWithOperand(const OperandX64& op, RegisterX64 reg) const;
bool interferesWithActiveSources(const CallArgument& targetArg, int targetArgIndex) const;
bool interferesWithActiveTarget(RegisterX64 sourceReg) const;
void moveToTarget(CallArgument& arg);
void freeSourceRegisters(CallArgument& arg);
void renameRegister(RegisterX64& target, RegisterX64 reg, RegisterX64 replacement);
void renameSourceRegisters(RegisterX64 reg, RegisterX64 replacement);
RegisterX64 findConflictingTarget() const;
void renameConflictingRegister(RegisterX64 conflict);
int getRegisterUses(RegisterX64 reg) const;
void addRegisterUse(RegisterX64 reg);
void removeRegisterUse(RegisterX64 reg);
static const int kMaxCallArguments = 6;
std::array<CallArgument, kMaxCallArguments> args;
int argCount = 0;
int gprPos = 0;
int xmmPos = 0;
OperandX64 funcOp;
// Internal counters for remaining register use counts
std::array<uint8_t, 16> gprUses;
std::array<uint8_t, 16> xmmUses;
};
} // namespace X64
} // namespace CodeGen
} // namespace Luau

File diff suppressed because it is too large Load Diff

View File

@ -1,47 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/IrData.h"
#include <string>
#include <vector>
namespace Luau
{
namespace CodeGen
{
struct CfgInfo;
const char* getCmdName(IrCmd cmd);
const char* getBlockKindName(IrBlockKind kind);
struct IrToStringContext
{
std::string& result;
const std::vector<IrBlock>& blocks;
const std::vector<IrConst>& constants;
const CfgInfo& cfg;
};
void toString(IrToStringContext& ctx, const IrInst& inst, uint32_t index);
void toString(IrToStringContext& ctx, const IrBlock& block, uint32_t index); // Block title
void toString(IrToStringContext& ctx, IrOp op);
void toString(std::string& result, IrConst constant);
void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, const IrInst& inst, uint32_t instIdx, bool includeUseInfo);
void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t index, bool includeUseInfo); // Block title
std::string toString(const IrFunction& function, bool includeUseInfo);
std::string dump(const IrFunction& function);
std::string toDot(const IrFunction& function, bool includeInst);
std::string toDotCfg(const IrFunction& function);
std::string toDotDjGraph(const IrFunction& function);
std::string dumpDot(const IrFunction& function, bool includeInst);
} // namespace CodeGen
} // namespace Luau

View File

@ -1,121 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/IrData.h"
#include "Luau/RegisterX64.h"
#include <array>
#include <initializer_list>
namespace Luau
{
namespace CodeGen
{
namespace X64
{
constexpr uint8_t kNoStackSlot = 0xff;
struct IrSpillX64
{
uint32_t instIdx = 0;
IrValueKind valueKind = IrValueKind::Unknown;
unsigned spillId = 0;
// Spill location can be a stack location or be empty
// When it's empty, it means that instruction value can be rematerialized
uint8_t stackSlot = kNoStackSlot;
RegisterX64 originalLoc = noreg;
};
struct IrRegAllocX64
{
IrRegAllocX64(AssemblyBuilderX64& build, IrFunction& function);
RegisterX64 allocReg(SizeX64 size, uint32_t instIdx);
RegisterX64 allocRegOrReuse(SizeX64 size, uint32_t instIdx, std::initializer_list<IrOp> oprefs);
RegisterX64 takeReg(RegisterX64 reg, uint32_t instIdx);
void freeReg(RegisterX64 reg);
void freeLastUseReg(IrInst& target, uint32_t instIdx);
void freeLastUseRegs(const IrInst& inst, uint32_t instIdx);
bool isLastUseReg(const IrInst& target, uint32_t instIdx) const;
bool shouldFreeGpr(RegisterX64 reg) const;
unsigned findSpillStackSlot(IrValueKind valueKind);
IrOp getRestoreOp(const IrInst& inst) const;
bool hasRestoreOp(const IrInst& inst) const;
OperandX64 getRestoreAddress(const IrInst& inst, IrOp restoreOp);
// Register used by instruction is about to be freed, have to find a way to restore value later
void preserve(IrInst& inst);
void restore(IrInst& inst, bool intoOriginalLocation);
void preserveAndFreeInstValues();
uint32_t findInstructionWithFurthestNextUse(const std::array<uint32_t, 16>& regInstUsers) const;
void assertFree(RegisterX64 reg) const;
void assertAllFree() const;
void assertNoSpills() const;
AssemblyBuilderX64& build;
IrFunction& function;
uint32_t currInstIdx = ~0u;
std::array<bool, 16> freeGprMap;
std::array<uint32_t, 16> gprInstUsers;
std::array<bool, 16> freeXmmMap;
std::array<uint32_t, 16> xmmInstUsers;
std::bitset<256> usedSpillSlots;
unsigned maxUsedSlot = 0;
unsigned nextSpillId = 1;
std::vector<IrSpillX64> spills;
};
struct ScopedRegX64
{
explicit ScopedRegX64(IrRegAllocX64& owner);
ScopedRegX64(IrRegAllocX64& owner, SizeX64 size);
ScopedRegX64(IrRegAllocX64& owner, RegisterX64 reg);
~ScopedRegX64();
ScopedRegX64(const ScopedRegX64&) = delete;
ScopedRegX64& operator=(const ScopedRegX64&) = delete;
void alloc(SizeX64 size);
void free();
RegisterX64 release();
IrRegAllocX64& owner;
RegisterX64 reg;
};
// When IR instruction makes a call under a condition that's not reflected as a real branch in IR,
// spilled values have to be restored to their exact original locations, so that both after a call
// and after the skip, values are found in the same place
struct ScopedSpills
{
explicit ScopedSpills(IrRegAllocX64& owner);
~ScopedSpills();
ScopedSpills(const ScopedSpills&) = delete;
ScopedSpills& operator=(const ScopedSpills&) = delete;
IrRegAllocX64& owner;
unsigned startSpillId = 0;
};
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,258 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Bytecode.h"
#include "Luau/Common.h"
#include "Luau/IrData.h"
namespace Luau
{
namespace CodeGen
{
struct IrBuilder;
inline bool isJumpD(LuauOpcode op)
{
switch (op)
{
case LOP_JUMP:
case LOP_JUMPIF:
case LOP_JUMPIFNOT:
case LOP_JUMPIFEQ:
case LOP_JUMPIFLE:
case LOP_JUMPIFLT:
case LOP_JUMPIFNOTEQ:
case LOP_JUMPIFNOTLE:
case LOP_JUMPIFNOTLT:
case LOP_FORNPREP:
case LOP_FORNLOOP:
case LOP_FORGPREP:
case LOP_FORGLOOP:
case LOP_FORGPREP_INEXT:
case LOP_FORGPREP_NEXT:
case LOP_JUMPBACK:
case LOP_JUMPXEQKNIL:
case LOP_JUMPXEQKB:
case LOP_JUMPXEQKN:
case LOP_JUMPXEQKS:
return true;
default:
return false;
}
}
inline bool isSkipC(LuauOpcode op)
{
switch (op)
{
case LOP_LOADB:
return true;
default:
return false;
}
}
inline bool isFastCall(LuauOpcode op)
{
switch (op)
{
case LOP_FASTCALL:
case LOP_FASTCALL1:
case LOP_FASTCALL2:
case LOP_FASTCALL2K:
return true;
default:
return false;
}
}
inline int getJumpTarget(uint32_t insn, uint32_t pc)
{
LuauOpcode op = LuauOpcode(LUAU_INSN_OP(insn));
if (isJumpD(op))
return int(pc + LUAU_INSN_D(insn) + 1);
else if (isFastCall(op))
return int(pc + LUAU_INSN_C(insn) + 2);
else if (isSkipC(op) && LUAU_INSN_C(insn))
return int(pc + LUAU_INSN_C(insn) + 1);
else if (op == LOP_JUMPX)
return int(pc + LUAU_INSN_E(insn) + 1);
else
return -1;
}
inline bool isBlockTerminator(IrCmd cmd)
{
switch (cmd)
{
case IrCmd::JUMP:
case IrCmd::JUMP_IF_TRUTHY:
case IrCmd::JUMP_IF_FALSY:
case IrCmd::JUMP_EQ_TAG:
case IrCmd::JUMP_EQ_INT:
case IrCmd::JUMP_LT_INT:
case IrCmd::JUMP_GE_UINT:
case IrCmd::JUMP_EQ_POINTER:
case IrCmd::JUMP_CMP_NUM:
case IrCmd::JUMP_CMP_ANY:
case IrCmd::JUMP_SLOT_MATCH:
case IrCmd::RETURN:
case IrCmd::FORGLOOP:
case IrCmd::FORGLOOP_FALLBACK:
case IrCmd::FORGPREP_XNEXT_FALLBACK:
case IrCmd::FALLBACK_FORGPREP:
return true;
default:
break;
}
return false;
}
inline bool isNonTerminatingJump(IrCmd cmd)
{
switch (cmd)
{
case IrCmd::TRY_NUM_TO_INDEX:
case IrCmd::TRY_CALL_FASTGETTM:
case IrCmd::CHECK_FASTCALL_RES:
case IrCmd::CHECK_TAG:
case IrCmd::CHECK_READONLY:
case IrCmd::CHECK_NO_METATABLE:
case IrCmd::CHECK_SAFE_ENV:
case IrCmd::CHECK_ARRAY_SIZE:
case IrCmd::CHECK_SLOT_MATCH:
case IrCmd::CHECK_NODE_NO_NEXT:
return true;
default:
break;
}
return false;
}
inline bool hasResult(IrCmd cmd)
{
switch (cmd)
{
case IrCmd::LOAD_TAG:
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_TVALUE:
case IrCmd::LOAD_NODE_VALUE_TV:
case IrCmd::LOAD_ENV:
case IrCmd::GET_ARR_ADDR:
case IrCmd::GET_SLOT_NODE_ADDR:
case IrCmd::GET_HASH_NODE_ADDR:
case IrCmd::ADD_INT:
case IrCmd::SUB_INT:
case IrCmd::ADD_NUM:
case IrCmd::SUB_NUM:
case IrCmd::MUL_NUM:
case IrCmd::DIV_NUM:
case IrCmd::MOD_NUM:
case IrCmd::MIN_NUM:
case IrCmd::MAX_NUM:
case IrCmd::UNM_NUM:
case IrCmd::FLOOR_NUM:
case IrCmd::CEIL_NUM:
case IrCmd::ROUND_NUM:
case IrCmd::SQRT_NUM:
case IrCmd::ABS_NUM:
case IrCmd::NOT_ANY:
case IrCmd::TABLE_LEN:
case IrCmd::NEW_TABLE:
case IrCmd::DUP_TABLE:
case IrCmd::TRY_NUM_TO_INDEX:
case IrCmd::TRY_CALL_FASTGETTM:
case IrCmd::INT_TO_NUM:
case IrCmd::UINT_TO_NUM:
case IrCmd::NUM_TO_INT:
case IrCmd::NUM_TO_UINT:
case IrCmd::SUBSTITUTE:
case IrCmd::INVOKE_FASTCALL:
case IrCmd::BITAND_UINT:
case IrCmd::BITXOR_UINT:
case IrCmd::BITOR_UINT:
case IrCmd::BITNOT_UINT:
case IrCmd::BITLSHIFT_UINT:
case IrCmd::BITRSHIFT_UINT:
case IrCmd::BITARSHIFT_UINT:
case IrCmd::BITLROTATE_UINT:
case IrCmd::BITRROTATE_UINT:
case IrCmd::BITCOUNTLZ_UINT:
case IrCmd::BITCOUNTRZ_UINT:
case IrCmd::INVOKE_LIBM:
return true;
default:
break;
}
return false;
}
inline bool hasSideEffects(IrCmd cmd)
{
if (cmd == IrCmd::INVOKE_FASTCALL)
return true;
// Instructions that don't produce a result most likely have other side-effects to make them useful
// Right now, a full switch would mirror the 'hasResult' function, so we use this simple condition
return !hasResult(cmd);
}
inline bool isPseudo(IrCmd cmd)
{
// Instructions that are used for internal needs and are not a part of final lowering
return cmd == IrCmd::NOP || cmd == IrCmd::SUBSTITUTE;
}
IrValueKind getCmdValueKind(IrCmd cmd);
bool isGCO(uint8_t tag);
// Manually add or remove use of an operand
void addUse(IrFunction& function, IrOp op);
void removeUse(IrFunction& function, IrOp op);
// Remove a single instruction
void kill(IrFunction& function, IrInst& inst);
// Remove a range of instructions
void kill(IrFunction& function, uint32_t start, uint32_t end);
// Remove a block, including all instructions inside
void kill(IrFunction& function, IrBlock& block);
// Replace a single operand and update use counts (can cause chain removal of dead code)
void replace(IrFunction& function, IrOp& original, IrOp replacement);
// Replace a single instruction
// Target instruction index instead of reference is used to handle introduction of a new block terminator
void replace(IrFunction& function, IrBlock& block, uint32_t instIdx, IrInst replacement);
// Replace instruction with a different value (using IrCmd::SUBSTITUTE)
void substitute(IrFunction& function, IrInst& inst, IrOp replacement);
// Replace instruction arguments that point to substitutions with target values
void applySubstitutions(IrFunction& function, IrOp& op);
void applySubstitutions(IrFunction& function, IrInst& inst);
// Compare numbers using IR condition value
bool compare(double a, double b, IrCondition cond);
// Perform constant folding on instruction at index
// For most instructions, successful folding results in a IrCmd::SUBSTITUTE
// But it can also be successful on conditional control-flow, replacing it with an unconditional IrCmd::JUMP
void foldConstants(IrBuilder& build, IrFunction& function, IrBlock& block, uint32_t instIdx);
uint32_t getNativeContextOffset(int bfid);
} // namespace CodeGen
} // namespace Luau

View File

@ -1,18 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
struct Label
{
uint32_t id = 0;
uint32_t location = ~0u;
};
} // namespace CodeGen
} // namespace Luau

View File

@ -1,145 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include "Luau/RegisterX64.h"
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
namespace X64
{
enum class CategoryX64 : uint8_t
{
reg,
mem,
imm,
};
struct OperandX64
{
constexpr OperandX64(RegisterX64 reg)
: cat(CategoryX64::reg)
, index(noreg)
, base(reg)
, memSize(SizeX64::none)
, scale(1)
, imm(0)
{
}
constexpr OperandX64(int32_t imm)
: cat(CategoryX64::imm)
, index(noreg)
, base(noreg)
, memSize(SizeX64::none)
, scale(1)
, imm(imm)
{
}
constexpr explicit OperandX64(SizeX64 size, RegisterX64 index, uint8_t scale, RegisterX64 base, int32_t disp)
: cat(CategoryX64::mem)
, index(index)
, base(base)
, memSize(size)
, scale(scale)
, imm(disp)
{
}
// Fields are carefully placed to make this struct fit into an 8 byte register
CategoryX64 cat;
RegisterX64 index;
RegisterX64 base;
SizeX64 memSize : 4;
uint8_t scale : 4;
int32_t imm;
constexpr OperandX64 operator[](OperandX64&& addr) const
{
LUAU_ASSERT(cat == CategoryX64::mem);
LUAU_ASSERT(index == noreg && scale == 1 && base == noreg && imm == 0);
LUAU_ASSERT(addr.memSize == SizeX64::none);
addr.cat = CategoryX64::mem;
addr.memSize = memSize;
return addr;
}
};
constexpr OperandX64 addr{SizeX64::none, noreg, 1, noreg, 0};
constexpr OperandX64 byte{SizeX64::byte, noreg, 1, noreg, 0};
constexpr OperandX64 word{SizeX64::word, noreg, 1, noreg, 0};
constexpr OperandX64 dword{SizeX64::dword, noreg, 1, noreg, 0};
constexpr OperandX64 qword{SizeX64::qword, noreg, 1, noreg, 0};
constexpr OperandX64 xmmword{SizeX64::xmmword, noreg, 1, noreg, 0};
constexpr OperandX64 ymmword{SizeX64::ymmword, noreg, 1, noreg, 0};
constexpr OperandX64 operator*(RegisterX64 reg, uint8_t scale)
{
if (scale == 1)
return OperandX64(reg);
LUAU_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8);
LUAU_ASSERT(reg.index != 0b100 && "can't scale SP");
return OperandX64(SizeX64::none, reg, scale, noreg, 0);
}
constexpr OperandX64 operator+(RegisterX64 reg, int32_t disp)
{
return OperandX64(SizeX64::none, noreg, 1, reg, disp);
}
constexpr OperandX64 operator-(RegisterX64 reg, int32_t disp)
{
return OperandX64(SizeX64::none, noreg, 1, reg, -disp);
}
constexpr OperandX64 operator+(RegisterX64 base, RegisterX64 index)
{
LUAU_ASSERT(index.index != 4 && "sp cannot be used as index");
LUAU_ASSERT(base.size == index.size);
return OperandX64(SizeX64::none, index, 1, base, 0);
}
constexpr OperandX64 operator+(OperandX64 op, int32_t disp)
{
LUAU_ASSERT(op.cat == CategoryX64::mem);
LUAU_ASSERT(op.memSize == SizeX64::none);
op.imm += disp;
return op;
}
constexpr OperandX64 operator+(OperandX64 op, RegisterX64 base)
{
LUAU_ASSERT(op.cat == CategoryX64::mem);
LUAU_ASSERT(op.memSize == SizeX64::none);
LUAU_ASSERT(op.base == noreg);
LUAU_ASSERT(op.index == noreg || op.index.size == base.size);
op.base = base;
return op;
}
constexpr OperandX64 operator+(RegisterX64 base, OperandX64 op)
{
LUAU_ASSERT(op.cat == CategoryX64::mem);
LUAU_ASSERT(op.memSize == SizeX64::none);
LUAU_ASSERT(op.base == noreg);
LUAU_ASSERT(op.index == noreg || op.index.size == base.size);
op.base = base;
return op;
}
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,17 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/IrData.h"
namespace Luau
{
namespace CodeGen
{
struct IrBuilder;
void constPropInBlockChains(IrBuilder& build, bool useValueNumbering);
void createLinearBlocks(IrBuilder& build, bool useValueNumbering);
} // namespace CodeGen
} // namespace Luau

View File

@ -1,14 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/IrData.h"
namespace Luau
{
namespace CodeGen
{
void optimizeMemoryOperandsX64(IrFunction& function);
} // namespace CodeGen
} // namespace Luau

View File

@ -1,233 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
namespace A64
{
enum class KindA64 : uint8_t
{
none,
w, // 32-bit GPR
x, // 64-bit GPR
s, // 32-bit SIMD&FP scalar
d, // 64-bit SIMD&FP scalar
q, // 128-bit SIMD&FP vector
};
struct RegisterA64
{
KindA64 kind : 3;
uint8_t index : 5;
constexpr bool operator==(RegisterA64 rhs) const
{
return kind == rhs.kind && index == rhs.index;
}
constexpr bool operator!=(RegisterA64 rhs) const
{
return !(*this == rhs);
}
};
constexpr RegisterA64 castReg(KindA64 kind, RegisterA64 reg)
{
LUAU_ASSERT(kind != reg.kind);
LUAU_ASSERT(kind != KindA64::none && reg.kind != KindA64::none);
LUAU_ASSERT((kind == KindA64::w || kind == KindA64::x) == (reg.kind == KindA64::w || reg.kind == KindA64::x));
return RegisterA64{kind, reg.index};
}
// This is equivalent to castReg(KindA64::x), but is separate because it implies different semantics
// Specifically, there are cases when it's useful to treat a wN register as an xN register *after* it has been assigned a value
// Since all A64 instructions that write to wN implicitly zero the top half, this works when we need zero extension semantics
// Crucially, this is *not* safe on an ABI boundary - an int parameter in wN register may have anything in its top half in certain cases
// However, as long as our codegen doesn't use 32-bit truncation by using castReg x=>w, we can safely rely on this.
constexpr RegisterA64 zextReg(RegisterA64 reg)
{
LUAU_ASSERT(reg.kind == KindA64::w);
return RegisterA64{KindA64::x, reg.index};
}
constexpr RegisterA64 noreg{KindA64::none, 0};
constexpr RegisterA64 w0{KindA64::w, 0};
constexpr RegisterA64 w1{KindA64::w, 1};
constexpr RegisterA64 w2{KindA64::w, 2};
constexpr RegisterA64 w3{KindA64::w, 3};
constexpr RegisterA64 w4{KindA64::w, 4};
constexpr RegisterA64 w5{KindA64::w, 5};
constexpr RegisterA64 w6{KindA64::w, 6};
constexpr RegisterA64 w7{KindA64::w, 7};
constexpr RegisterA64 w8{KindA64::w, 8};
constexpr RegisterA64 w9{KindA64::w, 9};
constexpr RegisterA64 w10{KindA64::w, 10};
constexpr RegisterA64 w11{KindA64::w, 11};
constexpr RegisterA64 w12{KindA64::w, 12};
constexpr RegisterA64 w13{KindA64::w, 13};
constexpr RegisterA64 w14{KindA64::w, 14};
constexpr RegisterA64 w15{KindA64::w, 15};
constexpr RegisterA64 w16{KindA64::w, 16};
constexpr RegisterA64 w17{KindA64::w, 17};
constexpr RegisterA64 w18{KindA64::w, 18};
constexpr RegisterA64 w19{KindA64::w, 19};
constexpr RegisterA64 w20{KindA64::w, 20};
constexpr RegisterA64 w21{KindA64::w, 21};
constexpr RegisterA64 w22{KindA64::w, 22};
constexpr RegisterA64 w23{KindA64::w, 23};
constexpr RegisterA64 w24{KindA64::w, 24};
constexpr RegisterA64 w25{KindA64::w, 25};
constexpr RegisterA64 w26{KindA64::w, 26};
constexpr RegisterA64 w27{KindA64::w, 27};
constexpr RegisterA64 w28{KindA64::w, 28};
constexpr RegisterA64 w29{KindA64::w, 29};
constexpr RegisterA64 w30{KindA64::w, 30};
constexpr RegisterA64 wzr{KindA64::w, 31};
constexpr RegisterA64 x0{KindA64::x, 0};
constexpr RegisterA64 x1{KindA64::x, 1};
constexpr RegisterA64 x2{KindA64::x, 2};
constexpr RegisterA64 x3{KindA64::x, 3};
constexpr RegisterA64 x4{KindA64::x, 4};
constexpr RegisterA64 x5{KindA64::x, 5};
constexpr RegisterA64 x6{KindA64::x, 6};
constexpr RegisterA64 x7{KindA64::x, 7};
constexpr RegisterA64 x8{KindA64::x, 8};
constexpr RegisterA64 x9{KindA64::x, 9};
constexpr RegisterA64 x10{KindA64::x, 10};
constexpr RegisterA64 x11{KindA64::x, 11};
constexpr RegisterA64 x12{KindA64::x, 12};
constexpr RegisterA64 x13{KindA64::x, 13};
constexpr RegisterA64 x14{KindA64::x, 14};
constexpr RegisterA64 x15{KindA64::x, 15};
constexpr RegisterA64 x16{KindA64::x, 16};
constexpr RegisterA64 x17{KindA64::x, 17};
constexpr RegisterA64 x18{KindA64::x, 18};
constexpr RegisterA64 x19{KindA64::x, 19};
constexpr RegisterA64 x20{KindA64::x, 20};
constexpr RegisterA64 x21{KindA64::x, 21};
constexpr RegisterA64 x22{KindA64::x, 22};
constexpr RegisterA64 x23{KindA64::x, 23};
constexpr RegisterA64 x24{KindA64::x, 24};
constexpr RegisterA64 x25{KindA64::x, 25};
constexpr RegisterA64 x26{KindA64::x, 26};
constexpr RegisterA64 x27{KindA64::x, 27};
constexpr RegisterA64 x28{KindA64::x, 28};
constexpr RegisterA64 x29{KindA64::x, 29};
constexpr RegisterA64 x30{KindA64::x, 30};
constexpr RegisterA64 xzr{KindA64::x, 31};
constexpr RegisterA64 sp{KindA64::none, 31};
constexpr RegisterA64 s0{KindA64::s, 0};
constexpr RegisterA64 s1{KindA64::s, 1};
constexpr RegisterA64 s2{KindA64::s, 2};
constexpr RegisterA64 s3{KindA64::s, 3};
constexpr RegisterA64 s4{KindA64::s, 4};
constexpr RegisterA64 s5{KindA64::s, 5};
constexpr RegisterA64 s6{KindA64::s, 6};
constexpr RegisterA64 s7{KindA64::s, 7};
constexpr RegisterA64 s8{KindA64::s, 8};
constexpr RegisterA64 s9{KindA64::s, 9};
constexpr RegisterA64 s10{KindA64::s, 10};
constexpr RegisterA64 s11{KindA64::s, 11};
constexpr RegisterA64 s12{KindA64::s, 12};
constexpr RegisterA64 s13{KindA64::s, 13};
constexpr RegisterA64 s14{KindA64::s, 14};
constexpr RegisterA64 s15{KindA64::s, 15};
constexpr RegisterA64 s16{KindA64::s, 16};
constexpr RegisterA64 s17{KindA64::s, 17};
constexpr RegisterA64 s18{KindA64::s, 18};
constexpr RegisterA64 s19{KindA64::s, 19};
constexpr RegisterA64 s20{KindA64::s, 20};
constexpr RegisterA64 s21{KindA64::s, 21};
constexpr RegisterA64 s22{KindA64::s, 22};
constexpr RegisterA64 s23{KindA64::s, 23};
constexpr RegisterA64 s24{KindA64::s, 24};
constexpr RegisterA64 s25{KindA64::s, 25};
constexpr RegisterA64 s26{KindA64::s, 26};
constexpr RegisterA64 s27{KindA64::s, 27};
constexpr RegisterA64 s28{KindA64::s, 28};
constexpr RegisterA64 s29{KindA64::s, 29};
constexpr RegisterA64 s30{KindA64::s, 30};
constexpr RegisterA64 s31{KindA64::s, 31};
constexpr RegisterA64 d0{KindA64::d, 0};
constexpr RegisterA64 d1{KindA64::d, 1};
constexpr RegisterA64 d2{KindA64::d, 2};
constexpr RegisterA64 d3{KindA64::d, 3};
constexpr RegisterA64 d4{KindA64::d, 4};
constexpr RegisterA64 d5{KindA64::d, 5};
constexpr RegisterA64 d6{KindA64::d, 6};
constexpr RegisterA64 d7{KindA64::d, 7};
constexpr RegisterA64 d8{KindA64::d, 8};
constexpr RegisterA64 d9{KindA64::d, 9};
constexpr RegisterA64 d10{KindA64::d, 10};
constexpr RegisterA64 d11{KindA64::d, 11};
constexpr RegisterA64 d12{KindA64::d, 12};
constexpr RegisterA64 d13{KindA64::d, 13};
constexpr RegisterA64 d14{KindA64::d, 14};
constexpr RegisterA64 d15{KindA64::d, 15};
constexpr RegisterA64 d16{KindA64::d, 16};
constexpr RegisterA64 d17{KindA64::d, 17};
constexpr RegisterA64 d18{KindA64::d, 18};
constexpr RegisterA64 d19{KindA64::d, 19};
constexpr RegisterA64 d20{KindA64::d, 20};
constexpr RegisterA64 d21{KindA64::d, 21};
constexpr RegisterA64 d22{KindA64::d, 22};
constexpr RegisterA64 d23{KindA64::d, 23};
constexpr RegisterA64 d24{KindA64::d, 24};
constexpr RegisterA64 d25{KindA64::d, 25};
constexpr RegisterA64 d26{KindA64::d, 26};
constexpr RegisterA64 d27{KindA64::d, 27};
constexpr RegisterA64 d28{KindA64::d, 28};
constexpr RegisterA64 d29{KindA64::d, 29};
constexpr RegisterA64 d30{KindA64::d, 30};
constexpr RegisterA64 d31{KindA64::d, 31};
constexpr RegisterA64 q0{KindA64::q, 0};
constexpr RegisterA64 q1{KindA64::q, 1};
constexpr RegisterA64 q2{KindA64::q, 2};
constexpr RegisterA64 q3{KindA64::q, 3};
constexpr RegisterA64 q4{KindA64::q, 4};
constexpr RegisterA64 q5{KindA64::q, 5};
constexpr RegisterA64 q6{KindA64::q, 6};
constexpr RegisterA64 q7{KindA64::q, 7};
constexpr RegisterA64 q8{KindA64::q, 8};
constexpr RegisterA64 q9{KindA64::q, 9};
constexpr RegisterA64 q10{KindA64::q, 10};
constexpr RegisterA64 q11{KindA64::q, 11};
constexpr RegisterA64 q12{KindA64::q, 12};
constexpr RegisterA64 q13{KindA64::q, 13};
constexpr RegisterA64 q14{KindA64::q, 14};
constexpr RegisterA64 q15{KindA64::q, 15};
constexpr RegisterA64 q16{KindA64::q, 16};
constexpr RegisterA64 q17{KindA64::q, 17};
constexpr RegisterA64 q18{KindA64::q, 18};
constexpr RegisterA64 q19{KindA64::q, 19};
constexpr RegisterA64 q20{KindA64::q, 20};
constexpr RegisterA64 q21{KindA64::q, 21};
constexpr RegisterA64 q22{KindA64::q, 22};
constexpr RegisterA64 q23{KindA64::q, 23};
constexpr RegisterA64 q24{KindA64::q, 24};
constexpr RegisterA64 q25{KindA64::q, 25};
constexpr RegisterA64 q26{KindA64::q, 26};
constexpr RegisterA64 q27{KindA64::q, 27};
constexpr RegisterA64 q28{KindA64::q, 28};
constexpr RegisterA64 q29{KindA64::q, 29};
constexpr RegisterA64 q30{KindA64::q, 30};
constexpr RegisterA64 q31{KindA64::q, 31};
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,152 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
namespace X64
{
enum class SizeX64 : uint8_t
{
none,
byte,
word,
dword,
qword,
xmmword,
ymmword,
};
struct RegisterX64
{
SizeX64 size : 3;
uint8_t index : 5;
constexpr bool operator==(RegisterX64 rhs) const
{
return size == rhs.size && index == rhs.index;
}
constexpr bool operator!=(RegisterX64 rhs) const
{
return !(*this == rhs);
}
};
constexpr RegisterX64 noreg{SizeX64::none, 16};
constexpr RegisterX64 rip{SizeX64::none, 0};
constexpr RegisterX64 al{SizeX64::byte, 0};
constexpr RegisterX64 cl{SizeX64::byte, 1};
constexpr RegisterX64 dl{SizeX64::byte, 2};
constexpr RegisterX64 bl{SizeX64::byte, 3};
constexpr RegisterX64 spl{SizeX64::byte, 4};
constexpr RegisterX64 bpl{SizeX64::byte, 5};
constexpr RegisterX64 sil{SizeX64::byte, 6};
constexpr RegisterX64 dil{SizeX64::byte, 7};
constexpr RegisterX64 r8b{SizeX64::byte, 8};
constexpr RegisterX64 r9b{SizeX64::byte, 9};
constexpr RegisterX64 r10b{SizeX64::byte, 10};
constexpr RegisterX64 r11b{SizeX64::byte, 11};
constexpr RegisterX64 r12b{SizeX64::byte, 12};
constexpr RegisterX64 r13b{SizeX64::byte, 13};
constexpr RegisterX64 r14b{SizeX64::byte, 14};
constexpr RegisterX64 r15b{SizeX64::byte, 15};
constexpr RegisterX64 eax{SizeX64::dword, 0};
constexpr RegisterX64 ecx{SizeX64::dword, 1};
constexpr RegisterX64 edx{SizeX64::dword, 2};
constexpr RegisterX64 ebx{SizeX64::dword, 3};
constexpr RegisterX64 esp{SizeX64::dword, 4};
constexpr RegisterX64 ebp{SizeX64::dword, 5};
constexpr RegisterX64 esi{SizeX64::dword, 6};
constexpr RegisterX64 edi{SizeX64::dword, 7};
constexpr RegisterX64 r8d{SizeX64::dword, 8};
constexpr RegisterX64 r9d{SizeX64::dword, 9};
constexpr RegisterX64 r10d{SizeX64::dword, 10};
constexpr RegisterX64 r11d{SizeX64::dword, 11};
constexpr RegisterX64 r12d{SizeX64::dword, 12};
constexpr RegisterX64 r13d{SizeX64::dword, 13};
constexpr RegisterX64 r14d{SizeX64::dword, 14};
constexpr RegisterX64 r15d{SizeX64::dword, 15};
constexpr RegisterX64 rax{SizeX64::qword, 0};
constexpr RegisterX64 rcx{SizeX64::qword, 1};
constexpr RegisterX64 rdx{SizeX64::qword, 2};
constexpr RegisterX64 rbx{SizeX64::qword, 3};
constexpr RegisterX64 rsp{SizeX64::qword, 4};
constexpr RegisterX64 rbp{SizeX64::qword, 5};
constexpr RegisterX64 rsi{SizeX64::qword, 6};
constexpr RegisterX64 rdi{SizeX64::qword, 7};
constexpr RegisterX64 r8{SizeX64::qword, 8};
constexpr RegisterX64 r9{SizeX64::qword, 9};
constexpr RegisterX64 r10{SizeX64::qword, 10};
constexpr RegisterX64 r11{SizeX64::qword, 11};
constexpr RegisterX64 r12{SizeX64::qword, 12};
constexpr RegisterX64 r13{SizeX64::qword, 13};
constexpr RegisterX64 r14{SizeX64::qword, 14};
constexpr RegisterX64 r15{SizeX64::qword, 15};
constexpr RegisterX64 xmm0{SizeX64::xmmword, 0};
constexpr RegisterX64 xmm1{SizeX64::xmmword, 1};
constexpr RegisterX64 xmm2{SizeX64::xmmword, 2};
constexpr RegisterX64 xmm3{SizeX64::xmmword, 3};
constexpr RegisterX64 xmm4{SizeX64::xmmword, 4};
constexpr RegisterX64 xmm5{SizeX64::xmmword, 5};
constexpr RegisterX64 xmm6{SizeX64::xmmword, 6};
constexpr RegisterX64 xmm7{SizeX64::xmmword, 7};
constexpr RegisterX64 xmm8{SizeX64::xmmword, 8};
constexpr RegisterX64 xmm9{SizeX64::xmmword, 9};
constexpr RegisterX64 xmm10{SizeX64::xmmword, 10};
constexpr RegisterX64 xmm11{SizeX64::xmmword, 11};
constexpr RegisterX64 xmm12{SizeX64::xmmword, 12};
constexpr RegisterX64 xmm13{SizeX64::xmmword, 13};
constexpr RegisterX64 xmm14{SizeX64::xmmword, 14};
constexpr RegisterX64 xmm15{SizeX64::xmmword, 15};
constexpr RegisterX64 ymm0{SizeX64::ymmword, 0};
constexpr RegisterX64 ymm1{SizeX64::ymmword, 1};
constexpr RegisterX64 ymm2{SizeX64::ymmword, 2};
constexpr RegisterX64 ymm3{SizeX64::ymmword, 3};
constexpr RegisterX64 ymm4{SizeX64::ymmword, 4};
constexpr RegisterX64 ymm5{SizeX64::ymmword, 5};
constexpr RegisterX64 ymm6{SizeX64::ymmword, 6};
constexpr RegisterX64 ymm7{SizeX64::ymmword, 7};
constexpr RegisterX64 ymm8{SizeX64::ymmword, 8};
constexpr RegisterX64 ymm9{SizeX64::ymmword, 9};
constexpr RegisterX64 ymm10{SizeX64::ymmword, 10};
constexpr RegisterX64 ymm11{SizeX64::ymmword, 11};
constexpr RegisterX64 ymm12{SizeX64::ymmword, 12};
constexpr RegisterX64 ymm13{SizeX64::ymmword, 13};
constexpr RegisterX64 ymm14{SizeX64::ymmword, 14};
constexpr RegisterX64 ymm15{SizeX64::ymmword, 15};
constexpr RegisterX64 byteReg(RegisterX64 reg)
{
return RegisterX64{SizeX64::byte, reg.index};
}
constexpr RegisterX64 wordReg(RegisterX64 reg)
{
return RegisterX64{SizeX64::word, reg.index};
}
constexpr RegisterX64 dwordReg(RegisterX64 reg)
{
return RegisterX64{SizeX64::dword, reg.index};
}
constexpr RegisterX64 qwordReg(RegisterX64 reg)
{
return RegisterX64{SizeX64::qword, reg.index};
}
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,61 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/RegisterA64.h"
#include "Luau/RegisterX64.h"
#include <initializer_list>
#include <stddef.h>
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
// This value is used in 'finishFunction' to mark the function that spans to the end of the whole code block
static uint32_t kFullBlockFuncton = ~0u;
class UnwindBuilder
{
public:
enum Arch
{
X64,
A64
};
virtual ~UnwindBuilder() = default;
virtual void setBeginOffset(size_t beginOffset) = 0;
virtual size_t getBeginOffset() const = 0;
virtual void startInfo(Arch arch) = 0;
virtual void startFunction() = 0;
virtual void finishFunction(uint32_t beginOffset, uint32_t endOffset) = 0;
virtual void finishInfo() = 0;
// A64-specific; prologue must look like this:
// sub sp, sp, stackSize
// store sequence that saves regs to [sp..sp+regs.size*8) in the order specified in regs; regs should start with x29, x30 (fp, lr)
// mov x29, sp
virtual void prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list<A64::RegisterA64> regs) = 0;
// X64-specific; prologue must look like this:
// optional, indicated by setupFrame:
// push rbp
// mov rbp, rsp
// push reg in the order specified in regs
// sub rsp, stackSize
virtual void prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list<X64::RegisterX64> regs) = 0;
virtual size_t getSize() const = 0;
virtual size_t getFunctionCount() const = 0;
// This will place the unwinding data at the target address and might update values of some fields
virtual void finalize(char* target, size_t offset, void* funcAddress, size_t funcSize) const = 0;
};
} // namespace CodeGen
} // namespace Luau

View File

@ -1,54 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/RegisterX64.h"
#include "UnwindBuilder.h"
#include <vector>
namespace Luau
{
namespace CodeGen
{
struct UnwindFunctionDwarf2
{
uint32_t beginOffset;
uint32_t endOffset;
uint32_t fdeEntryStartPos;
};
class UnwindBuilderDwarf2 : public UnwindBuilder
{
public:
void setBeginOffset(size_t beginOffset) override;
size_t getBeginOffset() const override;
void startInfo(Arch arch) override;
void startFunction() override;
void finishFunction(uint32_t beginOffset, uint32_t endOffset) override;
void finishInfo() override;
void prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list<A64::RegisterA64> regs) override;
void prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list<X64::RegisterX64> regs) override;
size_t getSize() const override;
size_t getFunctionCount() const override;
void finalize(char* target, size_t offset, void* funcAddress, size_t funcSize) const override;
private:
size_t beginOffset = 0;
std::vector<UnwindFunctionDwarf2> unwindFunctions;
static const unsigned kRawDataLimit = 1024;
uint8_t rawData[kRawDataLimit];
uint8_t* pos = rawData;
// We will remember the FDE location to write some of the fields like entry length, function start and size later
uint8_t* fdeEntryStart = nullptr;
};
} // namespace CodeGen
} // namespace Luau

View File

@ -1,78 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/RegisterX64.h"
#include "UnwindBuilder.h"
#include <vector>
namespace Luau
{
namespace CodeGen
{
// This struct matches the layout of x64 RUNTIME_FUNCTION from winnt.h
struct UnwindFunctionWin
{
uint32_t beginOffset;
uint32_t endOffset;
uint32_t unwindInfoOffset;
};
// This struct matches the layout of x64 UNWIND_INFO from ehdata.h
struct UnwindInfoWin
{
uint8_t version : 3;
uint8_t flags : 5;
uint8_t prologsize;
uint8_t unwindcodecount;
uint8_t framereg : 4;
uint8_t frameregoff : 4;
};
// This struct matches the layout of UNWIND_CODE from ehdata.h
struct UnwindCodeWin
{
uint8_t offset;
uint8_t opcode : 4;
uint8_t opinfo : 4;
};
class UnwindBuilderWin : public UnwindBuilder
{
public:
void setBeginOffset(size_t beginOffset) override;
size_t getBeginOffset() const override;
void startInfo(Arch arch) override;
void startFunction() override;
void finishFunction(uint32_t beginOffset, uint32_t endOffset) override;
void finishInfo() override;
void prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list<A64::RegisterA64> regs) override;
void prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list<X64::RegisterX64> regs) override;
size_t getSize() const override;
size_t getFunctionCount() const override;
void finalize(char* target, size_t offset, void* funcAddress, size_t funcSize) const override;
private:
size_t beginOffset = 0;
static const unsigned kRawDataLimit = 1024;
uint8_t rawData[kRawDataLimit];
uint8_t* rawDataPos = rawData;
std::vector<UnwindFunctionWin> unwindFunctions;
// Windows unwind codes are written in reverse, so we have to collect them all first
std::vector<UnwindCodeWin> unwindCodes;
uint8_t prologSize = 0;
X64::RegisterX64 frameReg = X64::noreg;
uint8_t frameRegOffset = 0;
};
} // namespace CodeGen
} // namespace Luau

View File

@ -1,18 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
// Can be used to reconfigure visibility/exports for public APIs
#ifndef LUACODEGEN_API
#define LUACODEGEN_API extern
#endif
struct lua_State;
// returns 1 if Luau code generator is supported, 0 otherwise
LUACODEGEN_API int luau_codegen_supported(void);
// create an instance of Luau code generator. you must check that this feature is supported using luau_codegen_supported().
LUACODEGEN_API void luau_codegen_create(lua_State* L);
// build target function and all inner functions
LUACODEGEN_API void luau_codegen_compile(lua_State* L, int idx);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,56 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <stdint.h>
#ifdef _MSC_VER
#include <intrin.h>
#endif
namespace Luau
{
namespace CodeGen
{
inline int countlz(uint32_t n)
{
#ifdef _MSC_VER
unsigned long rl;
return _BitScanReverse(&rl, n) ? 31 - int(rl) : 32;
#else
return n == 0 ? 32 : __builtin_clz(n);
#endif
}
inline int countrz(uint32_t n)
{
#ifdef _MSC_VER
unsigned long rl;
return _BitScanForward(&rl, n) ? int(rl) : 32;
#else
return n == 0 ? 32 : __builtin_ctz(n);
#endif
}
inline int lrotate(uint32_t u, int s)
{
// MSVC doesn't recognize the rotate form that is UB-safe
#ifdef _MSC_VER
return _rotl(u, s);
#else
return (u << (s & 31)) | (u >> ((32 - s) & 31));
#endif
}
inline int rrotate(uint32_t u, int s)
{
// MSVC doesn't recognize the rotate form that is UB-safe
#ifdef _MSC_VER
return _rotr(u, s);
#else
return (u >> (s & 31)) | (u << ((32 - s) & 31));
#endif
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,80 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#if defined(LUAU_BIG_ENDIAN)
#include <endian.h>
#endif
#include <string.h>
inline uint8_t* writeu8(uint8_t* target, uint8_t value)
{
*target = value;
return target + sizeof(value);
}
inline uint8_t* writeu32(uint8_t* target, uint32_t value)
{
#if defined(LUAU_BIG_ENDIAN)
value = htole32(value);
#endif
memcpy(target, &value, sizeof(value));
return target + sizeof(value);
}
inline uint8_t* writeu64(uint8_t* target, uint64_t value)
{
#if defined(LUAU_BIG_ENDIAN)
value = htole64(value);
#endif
memcpy(target, &value, sizeof(value));
return target + sizeof(value);
}
inline uint8_t* writeuleb128(uint8_t* target, uint64_t value)
{
do
{
uint8_t byte = value & 0x7f;
value >>= 7;
if (value)
byte |= 0x80;
*target++ = byte;
} while (value);
return target;
}
inline uint8_t* writef32(uint8_t* target, float value)
{
#if defined(LUAU_BIG_ENDIAN)
static_assert(sizeof(float) == sizeof(uint32_t), "type size must match to reinterpret data");
uint32_t data;
memcpy(&data, &value, sizeof(value));
writeu32(target, data);
#else
memcpy(target, &value, sizeof(value));
#endif
return target + sizeof(value);
}
inline uint8_t* writef64(uint8_t* target, double value)
{
#if defined(LUAU_BIG_ENDIAN)
static_assert(sizeof(double) == sizeof(uint64_t), "type size must match to reinterpret data");
uint64_t data;
memcpy(&data, &value, sizeof(value));
writeu64(target, data);
#else
memcpy(target, &value, sizeof(value));
#endif
return target + sizeof(value);
}

View File

@ -1,211 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/CodeAllocator.h"
#include "Luau/Common.h"
#include <string.h>
#if defined(_WIN32)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <Windows.h>
const size_t kPageSize = 4096;
#else
#include <sys/mman.h>
#include <unistd.h>
#if defined(__FreeBSD__) && !(_POSIX_C_SOURCE >= 200112L)
const size_t kPageSize = getpagesize();
#else
const size_t kPageSize = sysconf(_SC_PAGESIZE);
#endif
#endif
static size_t alignToPageSize(size_t size)
{
return (size + kPageSize - 1) & ~(kPageSize - 1);
}
#if defined(_WIN32)
static uint8_t* allocatePages(size_t size)
{
return (uint8_t*)VirtualAlloc(nullptr, alignToPageSize(size), MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
}
static void freePages(uint8_t* mem, size_t size)
{
if (VirtualFree(mem, 0, MEM_RELEASE) == 0)
LUAU_ASSERT(!"failed to deallocate block memory");
}
static void makePagesExecutable(uint8_t* mem, size_t size)
{
LUAU_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
LUAU_ASSERT(size == alignToPageSize(size));
DWORD oldProtect;
if (VirtualProtect(mem, size, PAGE_EXECUTE_READ, &oldProtect) == 0)
LUAU_ASSERT(!"Failed to change page protection");
}
static void flushInstructionCache(uint8_t* mem, size_t size)
{
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM)
if (FlushInstructionCache(GetCurrentProcess(), mem, size) == 0)
LUAU_ASSERT(!"Failed to flush instruction cache");
#endif
}
#else
static uint8_t* allocatePages(size_t size)
{
return (uint8_t*)mmap(nullptr, alignToPageSize(size), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
}
static void freePages(uint8_t* mem, size_t size)
{
if (munmap(mem, alignToPageSize(size)) != 0)
LUAU_ASSERT(!"Failed to deallocate block memory");
}
static void makePagesExecutable(uint8_t* mem, size_t size)
{
LUAU_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
LUAU_ASSERT(size == alignToPageSize(size));
if (mprotect(mem, size, PROT_READ | PROT_EXEC) != 0)
LUAU_ASSERT(!"Failed to change page protection");
}
static void flushInstructionCache(uint8_t* mem, size_t size)
{
__builtin___clear_cache((char*)mem, (char*)mem + size);
}
#endif
namespace Luau
{
namespace CodeGen
{
CodeAllocator::CodeAllocator(size_t blockSize, size_t maxTotalSize)
: blockSize(blockSize)
, maxTotalSize(maxTotalSize)
{
LUAU_ASSERT(blockSize > kMaxReservedDataSize);
LUAU_ASSERT(maxTotalSize >= blockSize);
}
CodeAllocator::~CodeAllocator()
{
if (destroyBlockUnwindInfo)
{
for (void* unwindInfo : unwindInfos)
destroyBlockUnwindInfo(context, unwindInfo);
}
for (uint8_t* block : blocks)
freePages(block, blockSize);
}
bool CodeAllocator::allocate(
const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize, uint8_t*& result, size_t& resultSize, uint8_t*& resultCodeStart)
{
// 'Round up' to preserve code alignment
size_t alignedDataSize = (dataSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1);
size_t totalSize = alignedDataSize + codeSize;
// Function has to fit into a single block with unwinding information
if (totalSize > blockSize - kMaxReservedDataSize)
return false;
size_t startOffset = 0;
// We might need a new block
if (totalSize > size_t(blockEnd - blockPos))
{
if (!allocateNewBlock(startOffset))
return false;
LUAU_ASSERT(totalSize <= size_t(blockEnd - blockPos));
}
LUAU_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0); // Allocation starts on page boundary
size_t dataOffset = startOffset + alignedDataSize - dataSize;
size_t codeOffset = startOffset + alignedDataSize;
if (dataSize)
memcpy(blockPos + dataOffset, data, dataSize);
if (codeSize)
memcpy(blockPos + codeOffset, code, codeSize);
size_t pageAlignedSize = alignToPageSize(startOffset + totalSize);
makePagesExecutable(blockPos, pageAlignedSize);
flushInstructionCache(blockPos + codeOffset, codeSize);
result = blockPos + startOffset;
resultSize = totalSize;
resultCodeStart = blockPos + codeOffset;
// Ensure that future allocations from the block start from a page boundary.
// This is important since we use W^X, and writing to the previous page would require briefly removing
// executable bit from it, which may result in access violations if that code is being executed concurrently.
if (pageAlignedSize <= size_t(blockEnd - blockPos))
{
blockPos += pageAlignedSize;
LUAU_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0);
LUAU_ASSERT(blockPos <= blockEnd);
}
else
{
// Future allocations will need to allocate fresh blocks
blockPos = blockEnd;
}
return true;
}
bool CodeAllocator::allocateNewBlock(size_t& unwindInfoSize)
{
// Stop allocating once we reach a global limit
if ((blocks.size() + 1) * blockSize > maxTotalSize)
return false;
uint8_t* block = allocatePages(blockSize);
if (!block)
return false;
blockPos = block;
blockEnd = block + blockSize;
blocks.push_back(block);
if (createBlockUnwindInfo)
{
void* unwindInfo = createBlockUnwindInfo(context, block, blockSize, unwindInfoSize);
// 'Round up' to preserve alignment of the following data and code
unwindInfoSize = (unwindInfoSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1);
LUAU_ASSERT(unwindInfoSize <= kMaxReservedDataSize);
if (!unwindInfo)
return false;
unwindInfos.push_back(unwindInfo);
}
return true;
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,121 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/CodeBlockUnwind.h"
#include "Luau/CodeAllocator.h"
#include "Luau/UnwindBuilder.h"
#include <string.h>
#include <stdlib.h>
#if defined(_WIN32) && defined(_M_X64)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <Windows.h>
#elif defined(__linux__) || defined(__APPLE__)
// Defined in unwind.h which may not be easily discoverable on various platforms
extern "C" void __register_frame(const void*);
extern "C" void __deregister_frame(const void*);
extern "C" void __unw_add_dynamic_fde() __attribute__((weak));
#endif
#if defined(__APPLE__) && defined(__aarch64__)
#include <sys/sysctl.h>
#endif
namespace Luau
{
namespace CodeGen
{
#if defined(__linux__) || defined(__APPLE__)
static void visitFdeEntries(char* pos, void (*cb)(const void*))
{
// When using glibc++ unwinder, we need to call __register_frame/__deregister_frame on the entire .eh_frame data
// When using libc++ unwinder (libunwind), each FDE has to be handled separately
// libc++ unwinder is the macOS unwinder, but on Linux the unwinder depends on the library the executable is linked with
// __unw_add_dynamic_fde is specific to libc++ unwinder, as such we determine the library based on its existence
if (__unw_add_dynamic_fde == nullptr)
return cb(pos);
for (;;)
{
unsigned partLength;
memcpy(&partLength, pos, sizeof(partLength));
if (partLength == 0) // Zero-length section signals completion
break;
unsigned partId;
memcpy(&partId, pos + 4, sizeof(partId));
if (partId != 0) // Skip CIE part
cb(pos); // CIE is found using an offset in FDE
pos += partLength + 4;
}
}
#endif
void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, size_t& beginOffset)
{
UnwindBuilder* unwind = (UnwindBuilder*)context;
// All unwinding related data is placed together at the start of the block
size_t unwindSize = unwind->getSize();
unwindSize = (unwindSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1); // Match code allocator alignment
LUAU_ASSERT(blockSize >= unwindSize);
char* unwindData = (char*)block;
unwind->finalize(unwindData, unwindSize, block, blockSize);
#if defined(_WIN32) && defined(_M_X64)
if (!RtlAddFunctionTable((RUNTIME_FUNCTION*)block, uint32_t(unwind->getFunctionCount()), uintptr_t(block)))
{
LUAU_ASSERT(!"Failed to allocate function table");
return nullptr;
}
#elif defined(__linux__) || defined(__APPLE__)
visitFdeEntries(unwindData, __register_frame);
#endif
beginOffset = unwindSize + unwind->getBeginOffset();
return block;
}
void destroyBlockUnwindInfo(void* context, void* unwindData)
{
#if defined(_WIN32) && defined(_M_X64)
if (!RtlDeleteFunctionTable((RUNTIME_FUNCTION*)unwindData))
LUAU_ASSERT(!"Failed to deallocate function table");
#elif defined(__linux__) || defined(__APPLE__)
visitFdeEntries((char*)unwindData, __deregister_frame);
#endif
}
bool isUnwindSupported()
{
#if defined(_WIN32) && defined(_M_X64)
return true;
#elif defined(__APPLE__) && defined(__aarch64__)
char ver[256];
size_t verLength = sizeof(ver);
// libunwind on macOS 12 and earlier (which maps to osrelease 21) assumes JIT frames use pointer authentication without a way to override that
return sysctlbyname("kern.osrelease", ver, &verLength, NULL, 0) == 0 && atoi(ver) >= 22;
#elif defined(__linux__) || defined(__APPLE__)
return true;
#else
return false;
#endif
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,635 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/CodeGen.h"
#include "Luau/Common.h"
#include "Luau/CodeAllocator.h"
#include "Luau/CodeBlockUnwind.h"
#include "Luau/IrAnalysis.h"
#include "Luau/IrBuilder.h"
#include "Luau/IrDump.h"
#include "Luau/IrUtils.h"
#include "Luau/OptimizeConstProp.h"
#include "Luau/OptimizeFinalX64.h"
#include "Luau/UnwindBuilder.h"
#include "Luau/UnwindBuilderDwarf2.h"
#include "Luau/UnwindBuilderWin.h"
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "NativeState.h"
#include "CodeGenA64.h"
#include "EmitCommonA64.h"
#include "IrLoweringA64.h"
#include "CodeGenX64.h"
#include "EmitCommonX64.h"
#include "EmitInstructionX64.h"
#include "IrLoweringX64.h"
#include "lapi.h"
#include <algorithm>
#include <memory>
#include <optional>
#if defined(__x86_64__) || defined(_M_X64)
#ifdef _MSC_VER
#include <intrin.h> // __cpuid
#else
#include <cpuid.h> // __cpuid
#endif
#endif
#if defined(__aarch64__)
#ifdef __APPLE__
#include <sys/sysctl.h>
#endif
#endif
LUAU_FASTFLAGVARIABLE(DebugCodegenNoOpt, false)
LUAU_FASTFLAGVARIABLE(DebugCodegenOptSize, false)
LUAU_FASTFLAGVARIABLE(DebugCodegenSkipNumbering, false)
namespace Luau
{
namespace CodeGen
{
static const Instruction kCodeEntryInsn = LOP_NATIVECALL;
static void* gPerfLogContext = nullptr;
static PerfLogFn gPerfLogFn = nullptr;
struct NativeProto
{
Proto* p;
void* execdata;
uintptr_t exectarget;
};
static NativeProto createNativeProto(Proto* proto, const IrBuilder& ir)
{
int sizecode = proto->sizecode;
uint32_t* instOffsets = new uint32_t[sizecode];
uint32_t instTarget = ir.function.bcMapping[0].asmLocation;
for (int i = 0; i < sizecode; i++)
{
LUAU_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
instOffsets[i] = ir.function.bcMapping[i].asmLocation - instTarget;
}
// entry target will be relocated when assembly is finalized
return {proto, instOffsets, instTarget};
}
static void destroyExecData(void* execdata)
{
delete[] static_cast<uint32_t*>(execdata);
}
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
{
LUAU_ASSERT(p->source);
const char* source = getstr(p->source);
source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]";
char name[256];
snprintf(name, sizeof(name), "<luau> %s:%d %s", source, p->linedefined, p->debugname ? getstr(p->debugname) : "");
if (gPerfLogFn)
gPerfLogFn(gPerfLogContext, addr, size, name);
}
template<typename AssemblyBuilder, typename IrLowering>
static bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& function, int bytecodeid, AssemblyOptions options)
{
// While we will need a better block ordering in the future, right now we want to mostly preserve build order with fallbacks outlined
std::vector<uint32_t> sortedBlocks;
sortedBlocks.reserve(function.blocks.size());
for (uint32_t i = 0; i < function.blocks.size(); i++)
sortedBlocks.push_back(i);
std::sort(sortedBlocks.begin(), sortedBlocks.end(), [&](uint32_t idxA, uint32_t idxB) {
const IrBlock& a = function.blocks[idxA];
const IrBlock& b = function.blocks[idxB];
// Place fallback blocks at the end
if ((a.kind == IrBlockKind::Fallback) != (b.kind == IrBlockKind::Fallback))
return (a.kind == IrBlockKind::Fallback) < (b.kind == IrBlockKind::Fallback);
// Try to order by instruction order
return a.sortkey < b.sortkey;
});
// For each IR instruction that begins a bytecode instruction, which bytecode instruction is it?
std::vector<uint32_t> bcLocations(function.instructions.size() + 1, ~0u);
for (size_t i = 0; i < function.bcMapping.size(); ++i)
{
uint32_t irLocation = function.bcMapping[i].irLocation;
if (irLocation != ~0u)
bcLocations[irLocation] = uint32_t(i);
}
bool outputEnabled = options.includeAssembly || options.includeIr;
IrToStringContext ctx{build.text, function.blocks, function.constants, function.cfg};
// We use this to skip outlined fallback blocks from IR/asm text output
size_t textSize = build.text.length();
uint32_t codeSize = build.getCodeSize();
bool seenFallback = false;
IrBlock dummy;
dummy.start = ~0u;
for (size_t i = 0; i < sortedBlocks.size(); ++i)
{
uint32_t blockIndex = sortedBlocks[i];
IrBlock& block = function.blocks[blockIndex];
if (block.kind == IrBlockKind::Dead)
continue;
LUAU_ASSERT(block.start != ~0u);
LUAU_ASSERT(block.finish != ~0u);
// If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them
if (block.kind == IrBlockKind::Fallback && !seenFallback)
{
textSize = build.text.length();
codeSize = build.getCodeSize();
seenFallback = true;
}
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, /* includeUseInfo */ true);
}
// Values can only reference restore operands in the current block
function.validRestoreOpBlockIdx = blockIndex;
build.setLabel(block.label);
for (uint32_t index = block.start; index <= block.finish; index++)
{
LUAU_ASSERT(index < function.instructions.size());
uint32_t bcLocation = bcLocations[index];
// If IR instruction is the first one for the original bytecode, we can annotate it with source code text
if (outputEnabled && options.annotator && bcLocation != ~0u)
{
options.annotator(options.annotatorContext, build.text, bytecodeid, bcLocation);
}
// If bytecode needs the location of this instruction for jumps, record it
if (bcLocation != ~0u)
{
Label label = (index == block.start) ? block.label : build.setLabel();
function.bcMapping[bcLocation].asmLocation = build.getLabelOffset(label);
}
IrInst& inst = function.instructions[index];
// Skip pseudo instructions, but make sure they are not used at this stage
// This also prevents them from getting into text output when that's enabled
if (isPseudo(inst.cmd))
{
LUAU_ASSERT(inst.useCount == 0);
continue;
}
// Either instruction result value is not referenced or the use count is not zero
LUAU_ASSERT(inst.lastUse == 0 || inst.useCount != 0);
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, inst, index, /* includeUseInfo */ true);
}
IrBlock& next = i + 1 < sortedBlocks.size() ? function.blocks[sortedBlocks[i + 1]] : dummy;
lowering.lowerInst(inst, index, next);
if (lowering.hasError())
{
// Place labels for all blocks that we're skipping
// This is needed to avoid AssemblyBuilder assertions about jumps in earlier blocks with unplaced labels
for (size_t j = i + 1; j < sortedBlocks.size(); ++j)
{
IrBlock& abandoned = function.blocks[sortedBlocks[j]];
build.setLabel(abandoned.label);
}
lowering.finishFunction();
return false;
}
}
lowering.finishBlock();
if (options.includeIr)
build.logAppend("#\n");
}
if (!seenFallback)
{
textSize = build.text.length();
codeSize = build.getCodeSize();
}
lowering.finishFunction();
if (outputEnabled && !options.includeOutlinedCode && textSize < build.text.size())
{
build.text.resize(textSize);
if (options.includeAssembly)
build.logAppend("; skipping %u bytes of outlined code\n", unsigned((build.getCodeSize() - codeSize) * sizeof(build.code[0])));
}
return true;
}
[[maybe_unused]] static bool lowerIr(
X64::AssemblyBuilderX64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{
optimizeMemoryOperandsX64(ir.function);
X64::IrLoweringX64 lowering(build, helpers, data, ir.function);
return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
}
[[maybe_unused]] static bool lowerIr(
A64::AssemblyBuilderA64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{
A64::IrLoweringA64 lowering(build, helpers, data, ir.function);
return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
}
template<typename AssemblyBuilder>
static std::optional<NativeProto> assembleFunction(AssemblyBuilder& build, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{
if (options.includeAssembly || options.includeIr)
{
if (proto->debugname)
build.logAppend("; function %s(", getstr(proto->debugname));
else
build.logAppend("; function(");
for (int i = 0; i < proto->numparams; i++)
{
LocVar* var = proto->locvars ? &proto->locvars[proto->sizelocvars - proto->numparams + i] : nullptr;
if (var && var->varname)
build.logAppend("%s%s", i == 0 ? "" : ", ", getstr(var->varname));
else
build.logAppend("%s$arg%d", i == 0 ? "" : ", ", i);
}
if (proto->numparams != 0 && proto->is_vararg)
build.logAppend(", ...)");
else
build.logAppend(")");
if (proto->linedefined >= 0)
build.logAppend(" line %d\n", proto->linedefined);
else
build.logAppend("\n");
}
IrBuilder ir;
ir.buildFunctionIr(proto);
computeCfgInfo(ir.function);
if (!FFlag::DebugCodegenNoOpt)
{
bool useValueNumbering = !FFlag::DebugCodegenSkipNumbering;
constPropInBlockChains(ir, useValueNumbering);
if (!FFlag::DebugCodegenOptSize)
createLinearBlocks(ir, useValueNumbering);
}
if (!lowerIr(build, ir, data, helpers, proto, options))
{
if (build.logText)
build.logAppend("; skipping (can't lower)\n\n");
return std::nullopt;
}
if (build.logText)
build.logAppend("\n");
return createNativeProto(proto, ir);
}
static NativeState* getNativeState(lua_State* L)
{
return static_cast<NativeState*>(L->global->ecb.context);
}
static void onCloseState(lua_State* L)
{
delete getNativeState(L);
L->global->ecb = lua_ExecutionCallbacks();
}
static void onDestroyFunction(lua_State* L, Proto* proto)
{
destroyExecData(proto->execdata);
proto->execdata = nullptr;
proto->exectarget = 0;
proto->codeentry = proto->code;
}
static int onEnter(lua_State* L, Proto* proto)
{
NativeState* data = getNativeState(L);
LUAU_ASSERT(proto->execdata);
LUAU_ASSERT(L->ci->savedpc >= proto->code && L->ci->savedpc < proto->code + proto->sizecode);
uintptr_t target = proto->exectarget + static_cast<uint32_t*>(proto->execdata)[L->ci->savedpc - proto->code];
// Returns 1 to finish the function in the VM
return GateFn(data->context.gateEntry)(L, proto, target, &data->context);
}
static void onSetBreakpoint(lua_State* L, Proto* proto, int instruction)
{
if (!proto->execdata)
return;
LUAU_ASSERT(!"Native breakpoints are not implemented");
}
#if defined(__aarch64__)
static unsigned int getCpuFeaturesA64()
{
unsigned int result = 0;
#ifdef __APPLE__
int jscvt = 0;
size_t jscvtLen = sizeof(jscvt);
if (sysctlbyname("hw.optional.arm.FEAT_JSCVT", &jscvt, &jscvtLen, nullptr, 0) == 0 && jscvt == 1)
result |= A64::Feature_JSCVT;
#endif
return result;
}
#endif
bool isSupported()
{
if (!LUA_CUSTOM_EXECUTION)
return false;
if (LUA_EXTRA_SIZE != 1)
return false;
if (sizeof(TValue) != 16)
return false;
if (sizeof(LuaNode) != 32)
return false;
// Windows CRT uses stack unwinding in longjmp so we have to use unwind data; on other platforms, it's only necessary for C++ EH.
#if defined(_WIN32)
if (!isUnwindSupported())
return false;
#else
if (!LUA_USE_LONGJMP && !isUnwindSupported())
return false;
#endif
#if defined(__x86_64__) || defined(_M_X64)
int cpuinfo[4] = {};
#ifdef _MSC_VER
__cpuid(cpuinfo, 1);
#else
__cpuid(1, cpuinfo[0], cpuinfo[1], cpuinfo[2], cpuinfo[3]);
#endif
// We require AVX1 support for VEX encoded XMM operations
// We also requre SSE4.1 support for ROUNDSD but the AVX check below covers it
// https://en.wikipedia.org/wiki/CPUID#EAX=1:_Processor_Info_and_Feature_Bits
if ((cpuinfo[2] & (1 << 28)) == 0)
return false;
return true;
#elif defined(__aarch64__)
return true;
#else
return false;
#endif
}
void create(lua_State* L)
{
LUAU_ASSERT(isSupported());
std::unique_ptr<NativeState> data = std::make_unique<NativeState>();
#if defined(_WIN32)
data->unwindBuilder = std::make_unique<UnwindBuilderWin>();
#else
data->unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
#endif
data->codeAllocator.context = data->unwindBuilder.get();
data->codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
data->codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
initFunctions(*data);
#if defined(__x86_64__) || defined(_M_X64)
if (!X64::initHeaderFunctions(*data))
return;
#elif defined(__aarch64__)
if (!A64::initHeaderFunctions(*data))
return;
#endif
if (gPerfLogFn)
gPerfLogFn(gPerfLogContext, uintptr_t(data->context.gateEntry), 4096, "<luau gate>");
lua_ExecutionCallbacks* ecb = &L->global->ecb;
ecb->context = data.release();
ecb->close = onCloseState;
ecb->destroy = onDestroyFunction;
ecb->enter = onEnter;
ecb->setbreakpoint = onSetBreakpoint;
}
static void gatherFunctions(std::vector<Proto*>& results, Proto* proto)
{
if (results.size() <= size_t(proto->bytecodeid))
results.resize(proto->bytecodeid + 1);
// Skip protos that we've already compiled in this run: this happens because at -O2, inlined functions get their protos reused
if (results[proto->bytecodeid])
return;
results[proto->bytecodeid] = proto;
for (int i = 0; i < proto->sizep; i++)
gatherFunctions(results, proto->p[i]);
}
void compile(lua_State* L, int idx)
{
LUAU_ASSERT(lua_isLfunction(L, idx));
const TValue* func = luaA_toobject(L, idx);
// If initialization has failed, do not compile any functions
NativeState* data = getNativeState(L);
if (!data)
return;
#if defined(__aarch64__)
A64::AssemblyBuilderA64 build(/* logText= */ false, getCpuFeaturesA64());
#else
X64::AssemblyBuilderX64 build(/* logText= */ false);
#endif
std::vector<Proto*> protos;
gatherFunctions(protos, clvalue(func)->l.p);
ModuleHelpers helpers;
#if defined(__aarch64__)
A64::assembleHelpers(build, helpers);
#else
X64::assembleHelpers(build, helpers);
#endif
std::vector<NativeProto> results;
results.reserve(protos.size());
// Skip protos that have been compiled during previous invocations of CodeGen::compile
for (Proto* p : protos)
if (p && p->execdata == nullptr)
if (std::optional<NativeProto> np = assembleFunction(build, *data, helpers, p, {}))
results.push_back(*np);
// Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module
if (!build.finalize())
{
for (NativeProto result : results)
destroyExecData(result.execdata);
return;
}
// If no functions were assembled, we don't need to allocate/copy executable pages for helpers
if (results.empty())
return;
uint8_t* nativeData = nullptr;
size_t sizeNativeData = 0;
uint8_t* codeStart = nullptr;
if (!data->codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast<const uint8_t*>(build.code.data()),
int(build.code.size() * sizeof(build.code[0])), nativeData, sizeNativeData, codeStart))
{
for (NativeProto result : results)
destroyExecData(result.execdata);
return;
}
if (gPerfLogFn && results.size() > 0)
{
gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), uint32_t(results[0].exectarget), "<luau helpers>");
for (size_t i = 0; i < results.size(); ++i)
{
uint32_t begin = uint32_t(results[i].exectarget);
uint32_t end = i + 1 < results.size() ? uint32_t(results[i + 1].exectarget) : uint32_t(build.code.size() * sizeof(build.code[0]));
LUAU_ASSERT(begin < end);
logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin);
}
}
for (NativeProto result : results)
{
// the memory is now managed by VM and will be freed via onDestroyFunction
result.p->execdata = result.execdata;
result.p->exectarget = uintptr_t(codeStart) + result.exectarget;
result.p->codeentry = &kCodeEntryInsn;
}
}
std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
{
LUAU_ASSERT(lua_isLfunction(L, idx));
const TValue* func = luaA_toobject(L, idx);
#if defined(__aarch64__)
A64::AssemblyBuilderA64 build(/* logText= */ options.includeAssembly, getCpuFeaturesA64());
#else
X64::AssemblyBuilderX64 build(/* logText= */ options.includeAssembly);
#endif
NativeState data;
initFunctions(data);
std::vector<Proto*> protos;
gatherFunctions(protos, clvalue(func)->l.p);
ModuleHelpers helpers;
#if defined(__aarch64__)
A64::assembleHelpers(build, helpers);
#else
X64::assembleHelpers(build, helpers);
#endif
if (!options.includeOutlinedCode && options.includeAssembly)
{
build.text.clear();
build.logAppend("; skipping %u bytes of outlined helpers\n", unsigned(build.getCodeSize() * sizeof(build.code[0])));
}
for (Proto* p : protos)
if (p)
if (std::optional<NativeProto> np = assembleFunction(build, data, helpers, p, options))
destroyExecData(np->execdata);
if (!build.finalize())
return std::string();
if (options.outputBinary)
return std::string(reinterpret_cast<const char*>(build.code.data()), reinterpret_cast<const char*>(build.code.data() + build.code.size())) +
std::string(build.data.begin(), build.data.end());
else
return build.text;
}
void setPerfLog(void* context, PerfLogFn logFn)
{
gPerfLogContext = context;
gPerfLogFn = logFn;
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,317 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "CodeGenA64.h"
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/UnwindBuilder.h"
#include "BitUtils.h"
#include "NativeState.h"
#include "EmitCommonA64.h"
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
namespace A64
{
struct EntryLocations
{
Label start;
Label prologueEnd;
Label epilogueStart;
};
static void emitExit(AssemblyBuilderA64& build, bool continueInVm)
{
build.mov(x0, continueInVm);
build.ldr(x1, mem(rNativeContext, offsetof(NativeContext, gateExit)));
build.br(x1);
}
static void emitInterrupt(AssemblyBuilderA64& build)
{
// x0 = pc offset
// x1 = return address in native code
Label skip;
// Stash return address in rBase; we need to reload rBase anyway
build.mov(rBase, x1);
// Load interrupt handler; it may be nullptr in case the update raced with the check before we got here
build.ldr(x2, mem(rState, offsetof(lua_State, global)));
build.ldr(x2, mem(x2, offsetof(global_State, cb.interrupt)));
build.cbz(x2, skip);
// Update savedpc; required in case interrupt errors
build.add(x0, rCode, x0);
build.ldr(x1, mem(rState, offsetof(lua_State, ci)));
build.str(x0, mem(x1, offsetof(CallInfo, savedpc)));
// Call interrupt
build.mov(x0, rState);
build.mov(w1, -1);
build.blr(x2);
// Check if we need to exit
build.ldrb(w0, mem(rState, offsetof(lua_State, status)));
build.cbz(w0, skip);
// L->ci->savedpc--
// note: recomputing this avoids having to stash x0
build.ldr(x1, mem(rState, offsetof(lua_State, ci)));
build.ldr(x0, mem(x1, offsetof(CallInfo, savedpc)));
build.sub(x0, x0, sizeof(Instruction));
build.str(x0, mem(x1, offsetof(CallInfo, savedpc)));
emitExit(build, /* continueInVm */ false);
build.setLabel(skip);
// Return back to caller; rBase has stashed return address
build.mov(x0, rBase);
emitUpdateBase(build); // interrupt may have reallocated stack
build.br(x0);
}
static void emitReentry(AssemblyBuilderA64& build, ModuleHelpers& helpers)
{
// x0 = closure object to reentry (equal to clvalue(L->ci->func))
// If the fallback requested an exit, we need to do this right away
build.cbz(x0, helpers.exitNoContinueVm);
emitUpdateBase(build);
// Need to update state of the current function before we jump away
build.ldr(x1, mem(x0, offsetof(Closure, l.p))); // cl->l.p aka proto
build.ldr(x2, mem(rState, offsetof(lua_State, ci))); // L->ci
// We need to check if the new frame can be executed natively
// TODO: .flags and .savedpc load below can be fused with ldp
build.ldr(w3, mem(x2, offsetof(CallInfo, flags)));
build.tbz(x3, countrz(LUA_CALLINFO_NATIVE), helpers.exitContinueVm);
build.mov(rClosure, x0);
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
// Get instruction index from instruction pointer
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 4 back to byte offset later so it cancels out
build.ldr(x2, mem(x2, offsetof(CallInfo, savedpc))); // L->ci->savedpc
build.sub(x2, x2, rCode);
// Get new instruction location and jump to it
LUAU_ASSERT(offsetof(Proto, exectarget) == offsetof(Proto, execdata) + 8);
build.ldp(x3, x4, mem(x1, offsetof(Proto, execdata)));
build.ldr(w2, mem(x3, x2));
build.add(x4, x4, x2);
build.br(x4);
}
void emitReturn(AssemblyBuilderA64& build, ModuleHelpers& helpers)
{
// x1 = res
// w2 = number of written values
// x0 = ci
build.ldr(x0, mem(rState, offsetof(lua_State, ci)));
// w3 = ci->nresults
build.ldr(w3, mem(x0, offsetof(CallInfo, nresults)));
Label skipResultCopy;
// Fill the rest of the expected results (nresults - written) with 'nil'
build.cmp(w2, w3);
build.b(ConditionA64::GreaterEqual, skipResultCopy);
// TODO: cmp above could compute this and flags using subs
build.sub(w2, w3, w2); // counter = nresults - written
build.mov(w4, LUA_TNIL);
Label repeatNilLoop = build.setLabel();
build.str(w4, mem(x1, offsetof(TValue, tt)));
build.add(x1, x1, sizeof(TValue));
build.sub(w2, w2, 1);
build.cbnz(w2, repeatNilLoop);
build.setLabel(skipResultCopy);
// x2 = cip = ci - 1
build.sub(x2, x0, sizeof(CallInfo));
// res = cip->top when nresults >= 0
Label skipFixedRetTop;
build.tbnz(w3, 31, skipFixedRetTop);
build.ldr(x1, mem(x2, offsetof(CallInfo, top))); // res = cip->top
build.setLabel(skipFixedRetTop);
// Update VM state (ci, base, top)
build.str(x2, mem(rState, offsetof(lua_State, ci))); // L->ci = cip
build.ldr(rBase, mem(x2, offsetof(CallInfo, base))); // sync base = L->base while we have a chance
build.str(rBase, mem(rState, offsetof(lua_State, base))); // L->base = cip->base
build.str(x1, mem(rState, offsetof(lua_State, top))); // L->top = res
// Unlikely, but this might be the last return from VM
build.ldr(w4, mem(x0, offsetof(CallInfo, flags)));
build.tbnz(w4, countrz(LUA_CALLINFO_RETURN), helpers.exitNoContinueVm);
// Continue in interpreter if function has no native data
build.ldr(w4, mem(x2, offsetof(CallInfo, flags)));
build.tbz(w4, countrz(LUA_CALLINFO_NATIVE), helpers.exitContinueVm);
// Need to update state of the current function before we jump away
build.ldr(rClosure, mem(x2, offsetof(CallInfo, func)));
build.ldr(rClosure, mem(rClosure, offsetof(TValue, value.gc)));
build.ldr(x1, mem(rClosure, offsetof(Closure, l.p))); // cl->l.p aka proto
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
// Get instruction index from instruction pointer
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 4 back to byte offset later so it cancels out
build.ldr(x2, mem(x2, offsetof(CallInfo, savedpc))); // cip->savedpc
build.sub(x2, x2, rCode);
// Get new instruction location and jump to it
LUAU_ASSERT(offsetof(Proto, exectarget) == offsetof(Proto, execdata) + 8);
build.ldp(x3, x4, mem(x1, offsetof(Proto, execdata)));
build.ldr(w2, mem(x3, x2));
build.add(x4, x4, x2);
build.br(x4);
}
static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilder& unwind)
{
EntryLocations locations;
// Arguments: x0 = lua_State*, x1 = Proto*, x2 = native code pointer to jump to, x3 = NativeContext*
locations.start = build.setLabel();
// prologue
build.sub(sp, sp, kStackSize);
build.stp(x29, x30, mem(sp)); // fp, lr
// stash non-volatile registers used for execution environment
build.stp(x19, x20, mem(sp, 16));
build.stp(x21, x22, mem(sp, 32));
build.stp(x23, x24, mem(sp, 48));
build.mov(x29, sp); // this is only necessary if we maintain frame pointers, which we do in the JIT for now
locations.prologueEnd = build.setLabel();
uint32_t prologueSize = build.getLabelOffset(locations.prologueEnd) - build.getLabelOffset(locations.start);
// Setup native execution environment
build.mov(rState, x0);
build.mov(rNativeContext, x3);
build.ldr(rBase, mem(x0, offsetof(lua_State, base))); // L->base
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
build.ldr(x9, mem(x0, offsetof(lua_State, ci))); // L->ci
build.ldr(x9, mem(x9, offsetof(CallInfo, func))); // L->ci->func
build.ldr(rClosure, mem(x9, offsetof(TValue, value.gc))); // L->ci->func->value.gc aka cl
// Jump to the specified instruction; further control flow will be handled with custom ABI with register setup from EmitCommonA64.h
build.br(x2);
// Even though we jumped away, we will return here in the end
locations.epilogueStart = build.setLabel();
// Cleanup and exit
build.ldp(x23, x24, mem(sp, 48));
build.ldp(x21, x22, mem(sp, 32));
build.ldp(x19, x20, mem(sp, 16));
build.ldp(x29, x30, mem(sp)); // fp, lr
build.add(sp, sp, kStackSize);
build.ret();
// Our entry function is special, it spans the whole remaining code area
unwind.startFunction();
unwind.prologueA64(prologueSize, kStackSize, {x29, x30, x19, x20, x21, x22, x23, x24});
unwind.finishFunction(build.getLabelOffset(locations.start), kFullBlockFuncton);
return locations;
}
bool initHeaderFunctions(NativeState& data)
{
AssemblyBuilderA64 build(/* logText= */ false);
UnwindBuilder& unwind = *data.unwindBuilder.get();
unwind.startInfo(UnwindBuilder::A64);
EntryLocations entryLocations = buildEntryFunction(build, unwind);
build.finalize();
unwind.finishInfo();
LUAU_ASSERT(build.data.empty());
uint8_t* codeStart = nullptr;
if (!data.codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast<const uint8_t*>(build.code.data()),
int(build.code.size() * sizeof(build.code[0])), data.gateData, data.gateDataSize, codeStart))
{
LUAU_ASSERT(!"Failed to create entry function");
return false;
}
// Set the offset at the begining so that functions in new blocks will not overlay the locations
// specified by the unwind information of the entry function
unwind.setBeginOffset(build.getLabelOffset(entryLocations.prologueEnd));
data.context.gateEntry = codeStart + build.getLabelOffset(entryLocations.start);
data.context.gateExit = codeStart + build.getLabelOffset(entryLocations.epilogueStart);
return true;
}
void assembleHelpers(AssemblyBuilderA64& build, ModuleHelpers& helpers)
{
if (build.logText)
build.logAppend("; exitContinueVm\n");
build.setLabel(helpers.exitContinueVm);
emitExit(build, /* continueInVm */ true);
if (build.logText)
build.logAppend("; exitNoContinueVm\n");
build.setLabel(helpers.exitNoContinueVm);
emitExit(build, /* continueInVm */ false);
if (build.logText)
build.logAppend("; reentry\n");
build.setLabel(helpers.reentry);
emitReentry(build, helpers);
if (build.logText)
build.logAppend("; interrupt\n");
build.setLabel(helpers.interrupt);
emitInterrupt(build);
if (build.logText)
build.logAppend("; return\n");
build.setLabel(helpers.return_);
emitReturn(build, helpers);
}
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,22 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
struct NativeState;
struct ModuleHelpers;
namespace A64
{
class AssemblyBuilderA64;
bool initHeaderFunctions(NativeState& data);
void assembleHelpers(AssemblyBuilderA64& build, ModuleHelpers& helpers);
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,917 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "CodeGenUtils.h"
#include "lvm.h"
#include "lbuiltins.h"
#include "lbytecode.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lgc.h"
#include "lmem.h"
#include "lnumutils.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include <string.h>
// All external function calls that can cause stack realloc or Lua calls have to be wrapped in VM_PROTECT
// This makes sure that we save the pc (in case the Lua call needs to generate a backtrace) before the call,
// and restores the stack pointer after in case stack gets reallocated
// Should only be used on the slow paths.
#define VM_PROTECT(x) \
{ \
L->ci->savedpc = pc; \
{ \
x; \
}; \
base = L->base; \
}
// Some external functions can cause an error, but never reallocate the stack; for these, VM_PROTECT_PC() is
// a cheaper version of VM_PROTECT that can be called before the external call.
#define VM_PROTECT_PC() L->ci->savedpc = pc
#define VM_REG(i) (LUAU_ASSERT(unsigned(i) < unsigned(L->top - base)), &base[i])
#define VM_KV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->l.p->sizek)), &k[i])
#define VM_UV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->nupvalues)), &cl->l.uprefs[i])
#define VM_PATCH_C(pc, slot) *const_cast<Instruction*>(pc) = ((uint8_t(slot) << 24) | (0x00ffffffu & *(pc)))
#define VM_PATCH_E(pc, slot) *const_cast<Instruction*>(pc) = ((uint32_t(slot) << 8) | (0x000000ffu & *(pc)))
#define VM_INTERRUPT() \
{ \
void (*interrupt)(lua_State*, int) = L->global->cb.interrupt; \
if (LUAU_UNLIKELY(!!interrupt)) \
{ /* the interrupt hook is called right before we advance pc */ \
VM_PROTECT(L->ci->savedpc++; interrupt(L, -1)); \
if (L->status != 0) \
{ \
L->ci->savedpc--; \
return NULL; \
} \
} \
}
namespace Luau
{
namespace CodeGen
{
bool forgLoopTableIter(lua_State* L, Table* h, int index, TValue* ra)
{
int sizearray = h->sizearray;
// first we advance index through the array portion
while (unsigned(index) < unsigned(sizearray))
{
TValue* e = &h->array[index];
if (!ttisnil(e))
{
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)));
setnvalue(ra + 3, double(index + 1));
setobj2s(L, ra + 4, e);
return true;
}
index++;
}
int sizenode = 1 << h->lsizenode;
// then we advance index through the hash portion
while (unsigned(index - h->sizearray) < unsigned(sizenode))
{
LuaNode* n = &h->node[index - sizearray];
if (!ttisnil(gval(n)))
{
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)));
getnodekey(L, ra + 3, n);
setobj(L, ra + 4, gval(n));
return true;
}
index++;
}
return false;
}
bool forgLoopNodeIter(lua_State* L, Table* h, int index, TValue* ra)
{
int sizearray = h->sizearray;
int sizenode = 1 << h->lsizenode;
// then we advance index through the hash portion
while (unsigned(index - sizearray) < unsigned(sizenode))
{
LuaNode* n = &h->node[index - sizearray];
if (!ttisnil(gval(n)))
{
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)));
getnodekey(L, ra + 3, n);
setobj(L, ra + 4, gval(n));
return true;
}
index++;
}
return false;
}
bool forgLoopNonTableFallback(lua_State* L, int insnA, int aux)
{
TValue* base = L->base;
TValue* ra = VM_REG(insnA);
// note: it's safe to push arguments past top for complicated reasons (see lvmexecute.cpp)
setobj2s(L, ra + 3 + 2, ra + 2);
setobj2s(L, ra + 3 + 1, ra + 1);
setobj2s(L, ra + 3, ra);
L->top = ra + 3 + 3; // func + 2 args (state and index)
LUAU_ASSERT(L->top <= L->stack_last);
luaD_call(L, ra + 3, uint8_t(aux));
L->top = L->ci->top;
// recompute ra since stack might have been reallocated
base = L->base;
ra = VM_REG(insnA);
// copy first variable back into the iteration index
setobj2s(L, ra + 2, ra + 3);
return !ttisnil(ra + 3);
}
void forgPrepXnextFallback(lua_State* L, TValue* ra, int pc)
{
if (!ttisfunction(ra))
{
Closure* cl = clvalue(L->ci->func);
L->ci->savedpc = cl->l.p->code + pc;
luaG_typeerror(L, ra, "iterate over");
}
}
Closure* callProlog(lua_State* L, TValue* ra, StkId argtop, int nresults)
{
// slow-path: not a function call
if (LUAU_UNLIKELY(!ttisfunction(ra)))
{
luaV_tryfuncTM(L, ra);
argtop++; // __call adds an extra self
}
Closure* ccl = clvalue(ra);
CallInfo* ci = incr_ci(L);
ci->func = ra;
ci->base = ra + 1;
ci->top = argtop + ccl->stacksize; // note: technically UB since we haven't reallocated the stack yet
ci->savedpc = NULL;
ci->flags = 0;
ci->nresults = nresults;
L->base = ci->base;
L->top = argtop;
// note: this reallocs stack, but we don't need to VM_PROTECT this
// this is because we're going to modify base/savedpc manually anyhow
// crucially, we can't use ra/argtop after this line
luaD_checkstack(L, ccl->stacksize);
return ccl;
}
void callEpilogC(lua_State* L, int nresults, int n)
{
// ci is our callinfo, cip is our parent
CallInfo* ci = L->ci;
CallInfo* cip = ci - 1;
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
// note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally
StkId res = ci->func;
StkId vali = L->top - n;
StkId valend = L->top;
int i;
for (i = nresults; i != 0 && vali < valend; i--)
setobj2s(L, res++, vali++);
while (i-- > 0)
setnilvalue(res++);
// pop the stack frame
L->ci = cip;
L->base = cip->base;
L->top = (nresults == LUA_MULTRET) ? res : cip->top;
}
// Extracted as-is from lvmexecute.cpp with the exception of control flow (reentry) and removed interrupts/savedpc
Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults)
{
// slow-path: not a function call
if (LUAU_UNLIKELY(!ttisfunction(ra)))
{
luaV_tryfuncTM(L, ra);
argtop++; // __call adds an extra self
}
Closure* ccl = clvalue(ra);
CallInfo* ci = incr_ci(L);
ci->func = ra;
ci->base = ra + 1;
ci->top = argtop + ccl->stacksize; // note: technically UB since we haven't reallocated the stack yet
ci->savedpc = NULL;
ci->flags = 0;
ci->nresults = nresults;
L->base = ci->base;
L->top = argtop;
// note: this reallocs stack, but we don't need to VM_PROTECT this
// this is because we're going to modify base/savedpc manually anyhow
// crucially, we can't use ra/argtop after this line
luaD_checkstack(L, ccl->stacksize);
LUAU_ASSERT(ci->top <= L->stack_last);
if (!ccl->isC)
{
Proto* p = ccl->l.p;
// fill unused parameters with nil
StkId argi = L->top;
StkId argend = L->base + p->numparams;
while (argi < argend)
setnilvalue(argi++); // complete missing arguments
L->top = p->is_vararg ? argi : ci->top;
// keep executing new function
ci->savedpc = p->code;
if (LUAU_LIKELY(p->execdata != NULL))
ci->flags = LUA_CALLINFO_NATIVE;
return ccl;
}
else
{
lua_CFunction func = ccl->c.f;
int n = func(L);
// yield
if (n < 0)
return NULL;
// ci is our callinfo, cip is our parent
CallInfo* ci = L->ci;
CallInfo* cip = ci - 1;
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
// note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally
StkId res = ci->func;
StkId vali = L->top - n;
StkId valend = L->top;
int i;
for (i = nresults; i != 0 && vali < valend; i--)
setobj2s(L, res++, vali++);
while (i-- > 0)
setnilvalue(res++);
// pop the stack frame
L->ci = cip;
L->base = cip->base;
L->top = (nresults == LUA_MULTRET) ? res : cip->top;
// keep executing current function
LUAU_ASSERT(isLua(cip));
return clvalue(cip->func);
}
}
const Instruction* executeGETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
uint32_t aux = *pc++;
TValue* kv = VM_KV(aux);
LUAU_ASSERT(ttisstring(kv));
// fast-path should already have been checked, so we skip checking for it here
Table* h = cl->env;
int slot = LUAU_INSN_C(insn) & h->nodemask8;
// slow-path, may invoke Lua calls via __index metamethod
TValue g;
sethvalue(L, &g, h);
L->cachedslot = slot;
VM_PROTECT(luaV_gettable(L, &g, kv, ra));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
return pc;
}
const Instruction* executeSETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
uint32_t aux = *pc++;
TValue* kv = VM_KV(aux);
LUAU_ASSERT(ttisstring(kv));
// fast-path should already have been checked, so we skip checking for it here
Table* h = cl->env;
int slot = LUAU_INSN_C(insn) & h->nodemask8;
// slow-path, may invoke Lua calls via __newindex metamethod
TValue g;
sethvalue(L, &g, h);
L->cachedslot = slot;
VM_PROTECT(luaV_settable(L, &g, kv, ra));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
return pc;
}
const Instruction* executeGETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
uint32_t aux = *pc++;
TValue* kv = VM_KV(aux);
LUAU_ASSERT(ttisstring(kv));
// fast-path: built-in table
if (ttistable(rb))
{
Table* h = hvalue(rb);
int slot = LUAU_INSN_C(insn) & h->nodemask8;
LuaNode* n = &h->node[slot];
// fast-path: value is in expected slot
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))))
{
setobj2s(L, ra, gval(n));
return pc;
}
else if (!h->metatable)
{
// fast-path: value is not in expected slot, but the table lookup doesn't involve metatable
const TValue* res = luaH_getstr(h, tsvalue(kv));
if (res != luaO_nilobject)
{
int cachedslot = gval2slot(h, res);
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, cachedslot);
}
setobj2s(L, ra, res);
return pc;
}
else
{
// slow-path, may invoke Lua calls via __index metamethod
L->cachedslot = slot;
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
return pc;
}
}
else
{
// fast-path: user data with C __index TM
const TValue* fn = 0;
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_INDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
{
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
StkId top = L->top;
setobj2s(L, top + 0, fn);
setobj2s(L, top + 1, rb);
setobj2s(L, top + 2, kv);
L->top = top + 3;
L->cachedslot = LUAU_INSN_C(insn);
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
return pc;
}
else if (ttisvector(rb))
{
// fast-path: quick case-insensitive comparison with "X"/"Y"/"Z"
const char* name = getstr(tsvalue(kv));
int ic = (name[0] | ' ') - 'x';
#if LUA_VECTOR_SIZE == 4
// 'w' is before 'x' in ascii, so ic is -1 when indexing with 'w'
if (ic == -1)
ic = 3;
#endif
if (unsigned(ic) < LUA_VECTOR_SIZE && name[1] == '\0')
{
const float* v = rb->value.v; // silences ubsan when indexing v[]
setnvalue(ra, v[ic]);
return pc;
}
fn = fasttm(L, L->global->mt[LUA_TVECTOR], TM_INDEX);
if (fn && ttisfunction(fn) && clvalue(fn)->isC)
{
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
StkId top = L->top;
setobj2s(L, top + 0, fn);
setobj2s(L, top + 1, rb);
setobj2s(L, top + 2, kv);
L->top = top + 3;
L->cachedslot = LUAU_INSN_C(insn);
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
return pc;
}
// fall through to slow path
}
// fall through to slow path
}
// slow-path, may invoke Lua calls via __index metamethod
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
return pc;
}
const Instruction* executeSETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
uint32_t aux = *pc++;
TValue* kv = VM_KV(aux);
LUAU_ASSERT(ttisstring(kv));
// fast-path: built-in table
if (ttistable(rb))
{
Table* h = hvalue(rb);
int slot = LUAU_INSN_C(insn) & h->nodemask8;
LuaNode* n = &h->node[slot];
// fast-path: value is in expected slot
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly))
{
setobj2t(L, gval(n), ra);
luaC_barriert(L, h, ra);
return pc;
}
else if (fastnotm(h->metatable, TM_NEWINDEX) && !h->readonly)
{
VM_PROTECT_PC(); // set may fail
TValue* res = luaH_setstr(L, h, tsvalue(kv));
int cachedslot = gval2slot(h, res);
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, cachedslot);
setobj2t(L, res, ra);
luaC_barriert(L, h, ra);
return pc;
}
else
{
// slow-path, may invoke Lua calls via __newindex metamethod
L->cachedslot = slot;
VM_PROTECT(luaV_settable(L, rb, kv, ra));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
return pc;
}
}
else
{
// fast-path: user data with C __newindex TM
const TValue* fn = 0;
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_NEWINDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
{
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
LUAU_ASSERT(L->top + 4 < L->stack + L->stacksize);
StkId top = L->top;
setobj2s(L, top + 0, fn);
setobj2s(L, top + 1, rb);
setobj2s(L, top + 2, kv);
setobj2s(L, top + 3, ra);
L->top = top + 4;
L->cachedslot = LUAU_INSN_C(insn);
VM_PROTECT(luaV_callTM(L, 3, -1));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
return pc;
}
else
{
// slow-path, may invoke Lua calls via __newindex metamethod
VM_PROTECT(luaV_settable(L, rb, kv, ra));
return pc;
}
}
}
const Instruction* executeNEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
Proto* pv = cl->l.p->p[LUAU_INSN_D(insn)];
LUAU_ASSERT(unsigned(LUAU_INSN_D(insn)) < unsigned(cl->l.p->sizep));
VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM
// note: we save closure to stack early in case the code below wants to capture it by value
Closure* ncl = luaF_newLclosure(L, pv->nups, cl->env, pv);
setclvalue(L, ra, ncl);
for (int ui = 0; ui < pv->nups; ++ui)
{
Instruction uinsn = *pc++;
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
switch (LUAU_INSN_A(uinsn))
{
case LCT_VAL:
setobj(L, &ncl->l.uprefs[ui], VM_REG(LUAU_INSN_B(uinsn)));
break;
case LCT_REF:
setupvalue(L, &ncl->l.uprefs[ui], luaF_findupval(L, VM_REG(LUAU_INSN_B(uinsn))));
break;
case LCT_UPVAL:
setobj(L, &ncl->l.uprefs[ui], VM_UV(LUAU_INSN_B(uinsn)));
break;
default:
LUAU_ASSERT(!"Unknown upvalue capture type");
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
}
}
VM_PROTECT(luaC_checkGC(L));
return pc;
}
const Instruction* executeNAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = VM_REG(LUAU_INSN_B(insn));
uint32_t aux = *pc++;
TValue* kv = VM_KV(aux);
LUAU_ASSERT(ttisstring(kv));
if (ttistable(rb))
{
Table* h = hvalue(rb);
// note: we can't use nodemask8 here because we need to query the main position of the table, and 8-bit nodemask8 only works
// for predictive lookups
LuaNode* n = &h->node[tsvalue(kv)->hash & (sizenode(h) - 1)];
const TValue* mt = 0;
const LuaNode* mtn = 0;
// fast-path: key is in the table in expected slot
if (ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)))
{
// note: order of copies allows rb to alias ra+1 or ra
setobj2s(L, ra + 1, rb);
setobj2s(L, ra, gval(n));
}
// fast-path: key is absent from the base, table has an __index table, and it has the result in the expected slot
else if (gnext(n) == 0 && (mt = fasttm(L, hvalue(rb)->metatable, TM_INDEX)) && ttistable(mt) &&
(mtn = &hvalue(mt)->node[LUAU_INSN_C(insn) & hvalue(mt)->nodemask8]) && ttisstring(gkey(mtn)) && tsvalue(gkey(mtn)) == tsvalue(kv) &&
!ttisnil(gval(mtn)))
{
// note: order of copies allows rb to alias ra+1 or ra
setobj2s(L, ra + 1, rb);
setobj2s(L, ra, gval(mtn));
}
else
{
// slow-path: handles full table lookup
setobj2s(L, ra + 1, rb);
L->cachedslot = LUAU_INSN_C(insn);
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
// recompute ra since stack might have been reallocated
ra = VM_REG(LUAU_INSN_A(insn));
if (ttisnil(ra))
luaG_methoderror(L, ra + 1, tsvalue(kv));
}
}
else
{
Table* mt = ttisuserdata(rb) ? uvalue(rb)->metatable : L->global->mt[ttype(rb)];
const TValue* tmi = 0;
// fast-path: metatable with __namecall
if (const TValue* fn = fasttm(L, mt, TM_NAMECALL))
{
// note: order of copies allows rb to alias ra+1 or ra
setobj2s(L, ra + 1, rb);
setobj2s(L, ra, fn);
L->namecall = tsvalue(kv);
}
else if ((tmi = fasttm(L, mt, TM_INDEX)) && ttistable(tmi))
{
Table* h = hvalue(tmi);
int slot = LUAU_INSN_C(insn) & h->nodemask8;
LuaNode* n = &h->node[slot];
// fast-path: metatable with __index that has method in expected slot
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))))
{
// note: order of copies allows rb to alias ra+1 or ra
setobj2s(L, ra + 1, rb);
setobj2s(L, ra, gval(n));
}
else
{
// slow-path: handles slot mismatch
setobj2s(L, ra + 1, rb);
L->cachedslot = slot;
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
// recompute ra since stack might have been reallocated
ra = VM_REG(LUAU_INSN_A(insn));
if (ttisnil(ra))
luaG_methoderror(L, ra + 1, tsvalue(kv));
}
}
else
{
// slow-path: handles non-table __index
setobj2s(L, ra + 1, rb);
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
// recompute ra since stack might have been reallocated
ra = VM_REG(LUAU_INSN_A(insn));
if (ttisnil(ra))
luaG_methoderror(L, ra + 1, tsvalue(kv));
}
}
// intentional fallthrough to CALL
LUAU_ASSERT(LUAU_INSN_OP(*pc) == LOP_CALL);
return pc;
}
const Instruction* executeSETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
StkId rb = &base[LUAU_INSN_B(insn)]; // note: this can point to L->top if c == LUA_MULTRET making VM_REG unsafe to use
int c = LUAU_INSN_C(insn) - 1;
uint32_t index = *pc++;
if (c == LUA_MULTRET)
{
c = int(L->top - rb);
L->top = L->ci->top;
}
Table* h = hvalue(ra);
// TODO: we really don't need this anymore
if (!ttistable(ra))
return NULL; // temporary workaround to weaken a rather powerful exploitation primitive in case of a MITM attack on bytecode
int last = index + c - 1;
if (last > h->sizearray)
{
VM_PROTECT_PC(); // luaH_resizearray may fail due to OOM
luaH_resizearray(L, h, last);
}
TValue* array = h->array;
for (int i = 0; i < c; ++i)
setobj2t(L, &array[index + i - 1], rb + i);
luaC_barrierfast(L, h);
return pc;
}
const Instruction* executeFORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
if (ttisfunction(ra))
{
// will be called during FORGLOOP
}
else
{
Table* mt = ttistable(ra) ? hvalue(ra)->metatable : ttisuserdata(ra) ? uvalue(ra)->metatable : cast_to(Table*, NULL);
if (const TValue* fn = fasttm(L, mt, TM_ITER))
{
setobj2s(L, ra + 1, ra);
setobj2s(L, ra, fn);
L->top = ra + 2; // func + self arg
LUAU_ASSERT(L->top <= L->stack_last);
VM_PROTECT(luaD_call(L, ra, 3));
L->top = L->ci->top;
// recompute ra since stack might have been reallocated
ra = VM_REG(LUAU_INSN_A(insn));
// protect against __iter returning nil, since nil is used as a marker for builtin iteration in FORGLOOP
if (ttisnil(ra))
{
VM_PROTECT_PC(); // next call always errors
luaG_typeerror(L, ra, "call");
}
}
else if (fasttm(L, mt, TM_CALL))
{
// table or userdata with __call, will be called during FORGLOOP
// TODO: we might be able to stop supporting this depending on whether it's used in practice
}
else if (ttistable(ra))
{
// set up registers for builtin iteration
setobj2s(L, ra + 1, ra);
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)));
setnilvalue(ra);
}
else
{
VM_PROTECT_PC(); // next call always errors
luaG_typeerror(L, ra, "iterate over");
}
}
pc += LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
return pc;
}
const Instruction* executeGETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int b = LUAU_INSN_B(insn) - 1;
int n = cast_int(base - L->ci->func) - cl->l.p->numparams - 1;
if (b == LUA_MULTRET)
{
VM_PROTECT(luaD_checkstack(L, n));
StkId ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack
for (int j = 0; j < n; j++)
setobj2s(L, ra + j, base - n + j);
L->top = ra + n;
return pc;
}
else
{
StkId ra = VM_REG(LUAU_INSN_A(insn));
for (int j = 0; j < b && j < n; j++)
setobj2s(L, ra + j, base - n + j);
for (int j = n; j < b; j++)
setnilvalue(ra + j);
return pc;
}
}
const Instruction* executeDUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
TValue* kv = VM_KV(LUAU_INSN_D(insn));
Closure* kcl = clvalue(kv);
VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM
// clone closure if the environment is not shared
// note: we save closure to stack early in case the code below wants to capture it by value
Closure* ncl = (kcl->env == cl->env) ? kcl : luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
setclvalue(L, ra, ncl);
// this loop does three things:
// - if the closure was created anew, it just fills it with upvalues
// - if the closure from the constant table is used, it fills it with upvalues so that it can be shared in the future
// - if the closure is reused, it checks if the reuse is safe via rawequal, and falls back to duplicating the closure
// normally this would use two separate loops, for reuse check and upvalue setup, but MSVC codegen goes crazy if you do that
for (int ui = 0; ui < kcl->nupvalues; ++ui)
{
Instruction uinsn = pc[ui];
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
LUAU_ASSERT(LUAU_INSN_A(uinsn) == LCT_VAL || LUAU_INSN_A(uinsn) == LCT_UPVAL);
TValue* uv = (LUAU_INSN_A(uinsn) == LCT_VAL) ? VM_REG(LUAU_INSN_B(uinsn)) : VM_UV(LUAU_INSN_B(uinsn));
// check if the existing closure is safe to reuse
if (ncl == kcl && luaO_rawequalObj(&ncl->l.uprefs[ui], uv))
continue;
// lazily clone the closure and update the upvalues
if (ncl == kcl && kcl->preload == 0)
{
ncl = luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
setclvalue(L, ra, ncl);
ui = -1; // restart the loop to fill all upvalues
continue;
}
// this updates a newly created closure, or an existing closure created during preload, in which case we need a barrier
setobj(L, &ncl->l.uprefs[ui], uv);
luaC_barrier(L, ncl, uv);
}
// this is a noop if ncl is newly created or shared successfully, but it has to run after the closure is preloaded for the first time
ncl->preload = 0;
if (kcl != ncl)
VM_PROTECT(luaC_checkGC(L));
pc += kcl->nupvalues;
return pc;
}
const Instruction* executePREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
Instruction insn = *pc++;
int numparams = LUAU_INSN_A(insn);
// all fixed parameters are copied after the top so we need more stack space
VM_PROTECT(luaD_checkstack(L, cl->stacksize + numparams));
// the caller must have filled extra fixed arguments with nil
LUAU_ASSERT(cast_int(L->top - base) >= numparams);
// move fixed parameters to final position
StkId fixed = base; // first fixed argument
base = L->top; // final position of first argument
for (int i = 0; i < numparams; ++i)
{
setobj2s(L, base + i, fixed + i);
setnilvalue(fixed + i);
}
// rewire our stack frame to point to the new base
L->ci->base = base;
L->ci->top = base + cl->stacksize;
L->base = base;
L->top = L->ci->top;
return pc;
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,35 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "lobject.h"
namespace Luau
{
namespace CodeGen
{
bool forgLoopTableIter(lua_State* L, Table* h, int index, TValue* ra);
bool forgLoopNodeIter(lua_State* L, Table* h, int index, TValue* ra);
bool forgLoopNonTableFallback(lua_State* L, int insnA, int aux);
void forgPrepXnextFallback(lua_State* L, TValue* ra, int pc);
Closure* callProlog(lua_State* L, TValue* ra, StkId argtop, int nresults);
void callEpilogC(lua_State* L, int nresults, int n);
Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults);
const Instruction* executeGETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeSETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeGETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeSETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeNEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeNAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeSETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeFORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeGETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeDUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executePREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
} // namespace CodeGen
} // namespace Luau

View File

@ -1,211 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "CodeGenX64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/UnwindBuilder.h"
#include "NativeState.h"
#include "EmitCommonX64.h"
#include "lstate.h"
/* An overview of native environment stack setup that we are making in the entry function:
* Each line is 8 bytes, stack grows downwards.
*
* | ... previous frames ...
* | rdx home space | (unused)
* | rcx home space | (unused)
* | return address |
* | ... saved non-volatile registers ... <-- rsp + kStackSize + kLocalsSize
* | unused | for 16 byte alignment of the stack
* | sCode |
* | sClosure | <-- rsp + kStackSize
* | argument 6 | <-- rsp + 40
* | argument 5 | <-- rsp + 32
* | r9 home space |
* | r8 home space |
* | rdx home space |
* | rcx home space | <-- rsp points here
*
* Arguments to our entry function are saved to home space only on Windows.
* Space for arguments to function we call is always reserved, but used only on Windows.
*
* Right now we use a frame pointer, but because of a fixed layout we can omit it in the future
*/
namespace Luau
{
namespace CodeGen
{
namespace X64
{
struct EntryLocations
{
Label start;
Label prologueEnd;
Label epilogueStart;
};
static EntryLocations buildEntryFunction(AssemblyBuilderX64& build, UnwindBuilder& unwind)
{
EntryLocations locations;
build.align(kFunctionAlignment, X64::AlignmentDataX64::Ud2);
locations.start = build.setLabel();
unwind.startFunction();
RegisterX64 rArg1 = (build.abi == ABIX64::Windows) ? rcx : rdi;
RegisterX64 rArg2 = (build.abi == ABIX64::Windows) ? rdx : rsi;
RegisterX64 rArg3 = (build.abi == ABIX64::Windows) ? r8 : rdx;
RegisterX64 rArg4 = (build.abi == ABIX64::Windows) ? r9 : rcx;
// Save common non-volatile registers
if (build.abi == ABIX64::SystemV)
{
// We need to use a standard rbp-based frame setup for debuggers to work with JIT code
build.push(rbp);
build.mov(rbp, rsp);
}
build.push(rbx);
build.push(r12);
build.push(r13);
build.push(r14);
build.push(r15);
if (build.abi == ABIX64::Windows)
{
// Save non-volatile registers that are specific to Windows x64 ABI
build.push(rdi);
build.push(rsi);
// On Windows, rbp is available as a general-purpose non-volatile register; we currently don't use it, but we need to push an even number
// of registers for stack alignment...
build.push(rbp);
// TODO: once we start using non-volatile SIMD registers on Windows, we will save those here
}
// Allocate stack space (reg home area + local data)
build.sub(rsp, kStackSize + kLocalsSize);
locations.prologueEnd = build.setLabel();
uint32_t prologueSize = build.getLabelOffset(locations.prologueEnd) - build.getLabelOffset(locations.start);
if (build.abi == ABIX64::SystemV)
unwind.prologueX64(prologueSize, kStackSize + kLocalsSize, /* setupFrame= */ true, {rbx, r12, r13, r14, r15});
else if (build.abi == ABIX64::Windows)
unwind.prologueX64(prologueSize, kStackSize + kLocalsSize, /* setupFrame= */ false, {rbx, r12, r13, r14, r15, rdi, rsi, rbp});
// Setup native execution environment
build.mov(rState, rArg1);
build.mov(rNativeContext, rArg4);
build.mov(rBase, qword[rState + offsetof(lua_State, base)]); // L->base
build.mov(rax, qword[rState + offsetof(lua_State, ci)]); // L->ci
build.mov(rax, qword[rax + offsetof(CallInfo, func)]); // L->ci->func
build.mov(rax, qword[rax + offsetof(TValue, value.gc)]); // L->ci->func->value.gc aka cl
build.mov(sClosure, rax);
build.mov(rConstants, qword[rArg2 + offsetof(Proto, k)]); // proto->k
build.mov(rax, qword[rArg2 + offsetof(Proto, code)]); // proto->code
build.mov(sCode, rax);
// Jump to the specified instruction; further control flow will be handled with custom ABI with register setup from EmitCommonX64.h
build.jmp(rArg3);
// Even though we jumped away, we will return here in the end
locations.epilogueStart = build.setLabel();
// Cleanup and exit
build.add(rsp, kStackSize + kLocalsSize);
if (build.abi == ABIX64::Windows)
{
build.pop(rbp);
build.pop(rsi);
build.pop(rdi);
}
build.pop(r15);
build.pop(r14);
build.pop(r13);
build.pop(r12);
build.pop(rbx);
if (build.abi == ABIX64::SystemV)
build.pop(rbp);
build.ret();
// Our entry function is special, it spans the whole remaining code area
unwind.finishFunction(build.getLabelOffset(locations.start), kFullBlockFuncton);
return locations;
}
bool initHeaderFunctions(NativeState& data)
{
AssemblyBuilderX64 build(/* logText= */ false);
UnwindBuilder& unwind = *data.unwindBuilder.get();
unwind.startInfo(UnwindBuilder::X64);
EntryLocations entryLocations = buildEntryFunction(build, unwind);
build.finalize();
unwind.finishInfo();
LUAU_ASSERT(build.data.empty());
uint8_t* codeStart = nullptr;
if (!data.codeAllocator.allocate(
build.data.data(), int(build.data.size()), build.code.data(), int(build.code.size()), data.gateData, data.gateDataSize, codeStart))
{
LUAU_ASSERT(!"Failed to create entry function");
return false;
}
// Set the offset at the begining so that functions in new blocks will not overlay the locations
// specified by the unwind information of the entry function
unwind.setBeginOffset(build.getLabelOffset(entryLocations.prologueEnd));
data.context.gateEntry = codeStart + build.getLabelOffset(entryLocations.start);
data.context.gateExit = codeStart + build.getLabelOffset(entryLocations.epilogueStart);
return true;
}
void assembleHelpers(X64::AssemblyBuilderX64& build, ModuleHelpers& helpers)
{
if (build.logText)
build.logAppend("; exitContinueVm\n");
build.setLabel(helpers.exitContinueVm);
emitExit(build, /* continueInVm */ true);
if (build.logText)
build.logAppend("; exitNoContinueVm\n");
build.setLabel(helpers.exitNoContinueVm);
emitExit(build, /* continueInVm */ false);
if (build.logText)
build.logAppend("; continueCallInVm\n");
build.setLabel(helpers.continueCallInVm);
emitContinueCallInVm(build);
if (build.logText)
build.logAppend("; interrupt\n");
build.setLabel(helpers.interrupt);
emitInterrupt(build);
if (build.logText)
build.logAppend("; return\n");
build.setLabel(helpers.return_);
emitReturn(build, helpers);
}
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,22 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
struct NativeState;
struct ModuleHelpers;
namespace X64
{
class AssemblyBuilderX64;
bool initHeaderFunctions(NativeState& data);
void assembleHelpers(AssemblyBuilderX64& build, ModuleHelpers& helpers);
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,127 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "EmitBuiltinsX64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/Bytecode.h"
#include "Luau/IrCallWrapperX64.h"
#include "Luau/IrRegAllocX64.h"
#include "EmitCommonX64.h"
#include "NativeState.h"
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
namespace X64
{
static void emitBuiltinMathFrexp(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int arg, int nresults)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::xmmword, luauRegValue(arg));
callWrap.addArgument(SizeX64::qword, sTemporarySlot);
callWrap.call(qword[rNativeContext + offsetof(NativeContext, libm_frexp)]);
build.vmovsd(luauRegValue(ra), xmm0);
if (nresults > 1)
{
build.vcvtsi2sd(xmm0, xmm0, dword[sTemporarySlot + 0]);
build.vmovsd(luauRegValue(ra + 1), xmm0);
}
}
static void emitBuiltinMathModf(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int arg, int nresults)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::xmmword, luauRegValue(arg));
callWrap.addArgument(SizeX64::qword, sTemporarySlot);
callWrap.call(qword[rNativeContext + offsetof(NativeContext, libm_modf)]);
build.vmovsd(xmm1, qword[sTemporarySlot + 0]);
build.vmovsd(luauRegValue(ra), xmm1);
if (nresults > 1)
build.vmovsd(luauRegValue(ra + 1), xmm0);
}
static void emitBuiltinMathSign(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int arg)
{
ScopedRegX64 tmp0{regs, SizeX64::xmmword};
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
ScopedRegX64 tmp3{regs, SizeX64::xmmword};
build.vmovsd(tmp0.reg, luauRegValue(arg));
build.vxorpd(tmp1.reg, tmp1.reg, tmp1.reg);
// Set tmp2 to -1 if arg < 0, else 0
build.vcmpltsd(tmp2.reg, tmp0.reg, tmp1.reg);
build.vmovsd(tmp3.reg, build.f64(-1));
build.vandpd(tmp2.reg, tmp2.reg, tmp3.reg);
// Set mask bit to 1 if 0 < arg, else 0
build.vcmpltsd(tmp0.reg, tmp1.reg, tmp0.reg);
// Result = (mask-bit == 1) ? 1.0 : tmp2
// If arg < 0 then tmp2 is -1 and mask-bit is 0, result is -1
// If arg == 0 then tmp2 is 0 and mask-bit is 0, result is 0
// If arg > 0 then tmp2 is 0 and mask-bit is 1, result is 1
build.vblendvpd(tmp0.reg, tmp2.reg, build.f64x2(1, 1), tmp0.reg);
build.vmovsd(luauRegValue(ra), tmp0.reg);
}
static void emitBuiltinType(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int arg)
{
ScopedRegX64 tmp0{regs, SizeX64::qword};
ScopedRegX64 tag{regs, SizeX64::dword};
build.mov(tag.reg, luauRegTag(arg));
build.mov(tmp0.reg, qword[rState + offsetof(lua_State, global)]);
build.mov(tmp0.reg, qword[tmp0.reg + qwordReg(tag.reg) * sizeof(TString*) + offsetof(global_State, ttname)]);
build.mov(luauRegValue(ra), tmp0.reg);
}
static void emitBuiltinTypeof(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int arg)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, luauRegAddress(arg));
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaT_objtypenamestr)]);
build.mov(luauRegValue(ra), rax);
}
void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int ra, int arg, OperandX64 arg2, int nparams, int nresults)
{
switch (bfid)
{
case LBF_MATH_FREXP:
LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
return emitBuiltinMathFrexp(regs, build, ra, arg, nresults);
case LBF_MATH_MODF:
LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
return emitBuiltinMathModf(regs, build, ra, arg, nresults);
case LBF_MATH_SIGN:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathSign(regs, build, ra, arg);
case LBF_TYPE:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinType(regs, build, ra, arg);
case LBF_TYPEOF:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinTypeof(regs, build, ra, arg);
default:
LUAU_ASSERT(!"Missing x64 lowering");
}
}
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,23 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
struct Label;
struct IrOp;
namespace X64
{
class AssemblyBuilderX64;
struct OperandX64;
struct IrRegAllocX64;
void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int ra, int arg, OperandX64 arg2, int nparams, int nresults);
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,38 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Label.h"
namespace Luau
{
namespace CodeGen
{
constexpr unsigned kTValueSizeLog2 = 4;
constexpr unsigned kLuaNodeSizeLog2 = 5;
// TKey.tt and TKey.next are packed together in a bitfield
constexpr unsigned kOffsetOfTKeyTagNext = 12; // offsetof cannot be used on a bit field
constexpr unsigned kTKeyTagBits = 4;
constexpr unsigned kTKeyTagMask = (1 << kTKeyTagBits) - 1;
constexpr unsigned kOffsetOfInstructionC = 3;
// Leaf functions that are placed in every module to perform common instruction sequences
struct ModuleHelpers
{
// A64/X64
Label exitContinueVm;
Label exitNoContinueVm;
Label return_;
Label interrupt;
// X64
Label continueCallInVm;
// A64
Label reentry; // x0: closure
};
} // namespace CodeGen
} // namespace Luau

View File

@ -1,59 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderA64.h"
#include "EmitCommon.h"
#include "lobject.h"
#include "ltm.h"
#include "lstate.h"
// AArch64 ABI reminder:
// Arguments: x0-x7, v0-v7
// Return: x0, v0 (or x8 that points to the address of the resulting structure)
// Volatile: x9-x15, v16-v31 ("caller-saved", any call may change them)
// Intra-procedure-call temporary: x16-x17 (any call or relocated jump may change them, as linker may point branches to veneers to perform long jumps)
// Non-volatile: x19-x28, v8-v15 ("callee-saved", preserved after calls, only bottom half of SIMD registers is preserved!)
// Reserved: x18: reserved for platform use; x29: frame pointer (unless omitted); x30: link register; x31: stack pointer
namespace Luau
{
namespace CodeGen
{
struct NativeState;
namespace A64
{
// Data that is very common to access is placed in non-volatile registers:
// 1. Constant registers (only loaded during codegen entry)
constexpr RegisterA64 rState = x19; // lua_State* L
constexpr RegisterA64 rNativeContext = x20; // NativeContext* context
// 2. Frame registers (reloaded when call frame changes; rBase is also reloaded after all calls that may reallocate stack)
constexpr RegisterA64 rConstants = x21; // TValue* k
constexpr RegisterA64 rClosure = x22; // Closure* cl
constexpr RegisterA64 rCode = x23; // Instruction* code
constexpr RegisterA64 rBase = x24; // StkId base
// Native code is as stackless as the interpreter, so we can place some data on the stack once and have it accessible at any point
// See CodeGenA64.cpp for layout
constexpr unsigned kStashSlots = 8; // stashed non-volatile registers
constexpr unsigned kSpillSlots = 22; // slots for spilling temporary registers
constexpr unsigned kTempSlots = 2; // 16 bytes of temporary space, such luxury!
constexpr unsigned kStackSize = (kStashSlots + kSpillSlots + kTempSlots) * 8;
constexpr AddressA64 sSpillArea = mem(sp, kStashSlots * 8);
constexpr AddressA64 sTemporary = mem(sp, (kStashSlots + kSpillSlots) * 8);
inline void emitUpdateBase(AssemblyBuilderA64& build)
{
build.ldr(rBase, mem(rState, offsetof(lua_State, base)));
}
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,440 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "EmitCommonX64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/IrCallWrapperX64.h"
#include "Luau/IrData.h"
#include "Luau/IrRegAllocX64.h"
#include "NativeState.h"
#include "lgc.h"
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
namespace X64
{
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, IrCondition cond, Label& label)
{
// Refresher on comi/ucomi EFLAGS:
// CF only: less
// ZF only: equal
// PF+CF+ZF: unordered (NaN)
if (rhs.cat == CategoryX64::reg)
{
build.vucomisd(rhs, lhs);
}
else
{
build.vmovsd(tmp, rhs);
build.vucomisd(tmp, lhs);
}
// Keep in mind that 'Not' conditions want 'true' for comparisons with NaN
// And because of NaN, integer check interchangeability like 'not less or equal' <-> 'greater' does not hold
switch (cond)
{
case IrCondition::NotLessEqual:
// (b < a) is the same as !(a <= b). jnae checks CF=1 which means < or NaN
build.jcc(ConditionX64::NotAboveEqual, label);
break;
case IrCondition::LessEqual:
// (b >= a) is the same as (a <= b). jae checks CF=0 which means >= and not NaN
build.jcc(ConditionX64::AboveEqual, label);
break;
case IrCondition::NotLess:
// (b <= a) is the same as !(a < b). jna checks CF=1 or ZF=1 which means <= or NaN
build.jcc(ConditionX64::NotAbove, label);
break;
case IrCondition::Less:
// (b > a) is the same as (a < b). ja checks CF=0 and ZF=0 which means > and not NaN
build.jcc(ConditionX64::Above, label);
break;
case IrCondition::NotEqual:
// ZF=0 or PF=1 means != or NaN
build.jcc(ConditionX64::NotZero, label);
build.jcc(ConditionX64::Parity, label);
break;
default:
LUAU_ASSERT(!"Unsupported condition");
}
}
void jumpOnAnyCmpFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, IrCondition cond, Label& label)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, luauRegAddress(ra));
callWrap.addArgument(SizeX64::qword, luauRegAddress(rb));
if (cond == IrCondition::NotLessEqual || cond == IrCondition::LessEqual)
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_lessequal)]);
else if (cond == IrCondition::NotLess || cond == IrCondition::Less)
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_lessthan)]);
else if (cond == IrCondition::NotEqual || cond == IrCondition::Equal)
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_equalval)]);
else
LUAU_ASSERT(!"Unsupported condition");
emitUpdateBase(build);
build.test(eax, eax);
build.jcc(cond == IrCondition::NotLessEqual || cond == IrCondition::NotLess || cond == IrCondition::NotEqual ? ConditionX64::Zero
: ConditionX64::NotZero,
label);
}
void getTableNodeAtCachedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, RegisterX64 table, int pcpos)
{
LUAU_ASSERT(tmp != node);
LUAU_ASSERT(table != node);
build.mov(node, qword[table + offsetof(Table, node)]);
// compute cached slot
build.mov(tmp, sCode);
build.movzx(dwordReg(tmp), byte[tmp + pcpos * sizeof(Instruction) + kOffsetOfInstructionC]);
build.and_(byteReg(tmp), byte[table + offsetof(Table, nodemask8)]);
// LuaNode* n = &h->node[slot];
build.shl(dwordReg(tmp), kLuaNodeSizeLog2);
build.add(node, tmp);
}
void convertNumberToIndexOrJump(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 numd, RegisterX64 numi, Label& label)
{
LUAU_ASSERT(numi.size == SizeX64::dword);
// Convert to integer, NaN is converted into 0x80000000
build.vcvttsd2si(numi, numd);
// Convert that integer back to double
build.vcvtsi2sd(tmp, numd, numi);
build.vucomisd(tmp, numd); // Sets ZF=1 if equal or NaN
// We don't need non-integer values
// But to skip the PF=1 check, we proceed with NaN because 0x80000000 index is out of bounds
build.jcc(ConditionX64::NotZero, label);
}
void callArithHelper(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, OperandX64 c, TMS tm)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, luauRegAddress(ra));
callWrap.addArgument(SizeX64::qword, luauRegAddress(rb));
callWrap.addArgument(SizeX64::qword, c);
callWrap.addArgument(SizeX64::dword, tm);
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_doarith)]);
emitUpdateBase(build);
}
void callLengthHelper(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, luauRegAddress(ra));
callWrap.addArgument(SizeX64::qword, luauRegAddress(rb));
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_dolen)]);
emitUpdateBase(build);
}
void callPrepareForN(IrRegAllocX64& regs, AssemblyBuilderX64& build, int limit, int step, int init)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, luauRegAddress(limit));
callWrap.addArgument(SizeX64::qword, luauRegAddress(step));
callWrap.addArgument(SizeX64::qword, luauRegAddress(init));
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_prepareFORN)]);
}
void callGetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, luauRegAddress(rb));
callWrap.addArgument(SizeX64::qword, c);
callWrap.addArgument(SizeX64::qword, luauRegAddress(ra));
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_gettable)]);
emitUpdateBase(build);
}
void callSetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, luauRegAddress(rb));
callWrap.addArgument(SizeX64::qword, c);
callWrap.addArgument(SizeX64::qword, luauRegAddress(ra));
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_settable)]);
emitUpdateBase(build);
}
void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, int ra, Label& skip)
{
// iscollectable(ra)
build.cmp(luauRegTag(ra), LUA_TSTRING);
build.jcc(ConditionX64::Less, skip);
// isblack(obj2gco(o))
build.test(byte[object + offsetof(GCheader, marked)], bitmask(BLACKBIT));
build.jcc(ConditionX64::Zero, skip);
// iswhite(gcvalue(ra))
build.mov(tmp, luauRegValue(ra));
build.test(byte[tmp + offsetof(GCheader, marked)], bit2mask(WHITE0BIT, WHITE1BIT));
build.jcc(ConditionX64::Zero, skip);
}
void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, int ra)
{
Label skip;
ScopedRegX64 tmp{regs, SizeX64::qword};
checkObjectBarrierConditions(build, tmp.reg, object, ra, skip);
{
ScopedSpills spillGuard(regs);
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, object, objectOp);
callWrap.addArgument(SizeX64::qword, tmp);
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaC_barrierf)]);
}
build.setLabel(skip);
}
void callBarrierTableFast(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 table, IrOp tableOp)
{
Label skip;
// isblack(obj2gco(t))
build.test(byte[table + offsetof(GCheader, marked)], bitmask(BLACKBIT));
build.jcc(ConditionX64::Zero, skip);
{
ScopedSpills spillGuard(regs);
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::qword, table, tableOp);
callWrap.addArgument(SizeX64::qword, addr[table + offsetof(Table, gclist)]);
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaC_barrierback)]);
}
build.setLabel(skip);
}
void callStepGc(IrRegAllocX64& regs, AssemblyBuilderX64& build)
{
Label skip;
{
ScopedRegX64 tmp1{regs, SizeX64::qword};
ScopedRegX64 tmp2{regs, SizeX64::qword};
build.mov(tmp1.reg, qword[rState + offsetof(lua_State, global)]);
build.mov(tmp2.reg, qword[tmp1.reg + offsetof(global_State, totalbytes)]);
build.cmp(tmp2.reg, qword[tmp1.reg + offsetof(global_State, GCthreshold)]);
build.jcc(ConditionX64::Below, skip);
}
{
ScopedSpills spillGuard(regs);
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
callWrap.addArgument(SizeX64::dword, 1);
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaC_step)]);
emitUpdateBase(build);
}
build.setLabel(skip);
}
void emitExit(AssemblyBuilderX64& build, bool continueInVm)
{
if (continueInVm)
build.mov(eax, 1);
else
build.xor_(eax, eax);
build.jmp(qword[rNativeContext + offsetof(NativeContext, gateExit)]);
}
void emitUpdateBase(AssemblyBuilderX64& build)
{
build.mov(rBase, qword[rState + offsetof(lua_State, base)]);
}
void emitInterrupt(AssemblyBuilderX64& build)
{
// rax = pcpos + 1
// rbx = return address in native code
// note: rbx is non-volatile so it will be saved across interrupt call automatically
RegisterX64 rArg1 = (build.abi == ABIX64::Windows) ? rcx : rdi;
RegisterX64 rArg2 = (build.abi == ABIX64::Windows) ? rdx : rsi;
Label skip;
// Update L->ci->savedpc; required in case interrupt errors
build.mov(rcx, sCode);
build.lea(rcx, addr[rcx + rax * sizeof(Instruction)]);
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.mov(qword[rax + offsetof(CallInfo, savedpc)], rcx);
// Load interrupt handler; it may be nullptr in case the update raced with the check before we got here
build.mov(rax, qword[rState + offsetof(lua_State, global)]);
build.mov(rax, qword[rax + offsetof(global_State, cb.interrupt)]);
build.test(rax, rax);
build.jcc(ConditionX64::Zero, skip);
// Call interrupt
build.mov(rArg1, rState);
build.mov(dwordReg(rArg2), -1);
build.call(rax);
// Check if we need to exit
build.mov(al, byte[rState + offsetof(lua_State, status)]);
build.test(al, al);
build.jcc(ConditionX64::Zero, skip);
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.sub(qword[rax + offsetof(CallInfo, savedpc)], sizeof(Instruction));
emitExit(build, /* continueInVm */ false);
build.setLabel(skip);
emitUpdateBase(build); // interrupt may have reallocated stack
build.jmp(rbx);
}
void emitFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, int offset, int pcpos)
{
// fallback(L, instruction, base, k)
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::qword, rState);
RegisterX64 reg = callWrap.suggestNextArgumentRegister(SizeX64::qword);
build.mov(reg, sCode);
callWrap.addArgument(SizeX64::qword, addr[reg + pcpos * sizeof(Instruction)]);
callWrap.addArgument(SizeX64::qword, rBase);
callWrap.addArgument(SizeX64::qword, rConstants);
callWrap.call(qword[rNativeContext + offset]);
emitUpdateBase(build);
}
void emitContinueCallInVm(AssemblyBuilderX64& build)
{
RegisterX64 proto = rcx; // Sync with emitInstCall
build.mov(rdx, qword[proto + offsetof(Proto, code)]);
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);
build.mov(qword[rax + offsetof(CallInfo, savedpc)], rdx);
emitExit(build, /* continueInVm */ true);
}
void emitReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers)
{
// input: res in rdi, number of written values in ecx
RegisterX64 res = rdi;
RegisterX64 written = ecx;
RegisterX64 ci = r8;
RegisterX64 cip = r9;
RegisterX64 nresults = esi;
build.mov(ci, qword[rState + offsetof(lua_State, ci)]);
build.lea(cip, addr[ci - sizeof(CallInfo)]);
// nresults = ci->nresults
build.mov(nresults, dword[ci + offsetof(CallInfo, nresults)]);
Label skipResultCopy;
// Fill the rest of the expected results (nresults - written) with 'nil'
RegisterX64 counter = written;
build.sub(counter, nresults); // counter = -(nresults - written)
build.jcc(ConditionX64::GreaterEqual, skipResultCopy);
Label repeatNilLoop = build.setLabel();
build.mov(dword[res + offsetof(TValue, tt)], LUA_TNIL);
build.add(res, sizeof(TValue));
build.inc(counter);
build.jcc(ConditionX64::NotZero, repeatNilLoop);
build.setLabel(skipResultCopy);
build.mov(qword[rState + offsetof(lua_State, ci)], cip); // L->ci = cip
build.mov(rBase, qword[cip + offsetof(CallInfo, base)]); // sync base = L->base while we have a chance
build.mov(qword[rState + offsetof(lua_State, base)], rBase); // L->base = cip->base
Label skipFixedRetTop;
build.test(nresults, nresults); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(ConditionX64::Less, skipFixedRetTop); // jl jumps if SF != OF
build.mov(res, qword[cip + offsetof(CallInfo, top)]); // res = cip->top
build.setLabel(skipFixedRetTop);
build.mov(qword[rState + offsetof(lua_State, top)], res); // L->top = res
// Unlikely, but this might be the last return from VM
build.test(byte[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_RETURN);
build.jcc(ConditionX64::NotZero, helpers.exitNoContinueVm);
// Returning back to the previous function is a bit tricky
// Registers alive: r9 (cip)
RegisterX64 proto = rcx;
RegisterX64 execdata = rbx;
// Change closure
build.mov(rax, qword[cip + offsetof(CallInfo, func)]);
build.mov(rax, qword[rax + offsetof(TValue, value.gc)]);
build.mov(sClosure, rax);
build.mov(proto, qword[rax + offsetof(Closure, l.p)]);
build.mov(execdata, qword[proto + offsetof(Proto, execdata)]);
build.test(byte[cip + offsetof(CallInfo, flags)], LUA_CALLINFO_NATIVE);
build.jcc(ConditionX64::Zero, helpers.exitContinueVm); // Continue in interpreter if function has no native data
// Change constants
build.mov(rConstants, qword[proto + offsetof(Proto, k)]);
// Change code
build.mov(rdx, qword[proto + offsetof(Proto, code)]);
build.mov(sCode, rdx);
build.mov(rax, qword[cip + offsetof(CallInfo, savedpc)]);
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 4 back to byte offset later so it cancels out
build.sub(rax, rdx);
// Get new instruction location and jump to it
build.mov(edx, dword[execdata + rax]);
build.add(rdx, qword[proto + offsetof(Proto, exectarget)]);
build.jmp(rdx);
}
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,189 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderX64.h"
#include "EmitCommon.h"
#include "lobject.h"
#include "ltm.h"
// MS x64 ABI reminder:
// Arguments: rcx, rdx, r8, r9 ('overlapped' with xmm0-xmm3)
// Return: rax, xmm0
// Nonvolatile: r12-r15, rdi, rsi, rbx, rbp
// SIMD: only xmm6-xmm15 are non-volatile, all ymm upper parts are volatile
// AMD64 ABI reminder:
// Arguments: rdi, rsi, rdx, rcx, r8, r9 (xmm0-xmm7)
// Return: rax, rdx, xmm0, xmm1
// Nonvolatile: r12-r15, rbx, rbp
// SIMD: all volatile
namespace Luau
{
namespace CodeGen
{
enum class IrCondition : uint8_t;
struct NativeState;
struct IrOp;
namespace X64
{
struct IrRegAllocX64;
constexpr uint32_t kFunctionAlignment = 32;
// Data that is very common to access is placed in non-volatile registers
constexpr RegisterX64 rState = r15; // lua_State* L
constexpr RegisterX64 rBase = r14; // StkId base
constexpr RegisterX64 rNativeContext = r13; // NativeContext* context
constexpr RegisterX64 rConstants = r12; // TValue* k
// Native code is as stackless as the interpreter, so we can place some data on the stack once and have it accessible at any point
// See CodeGenX64.cpp for layout
constexpr unsigned kStackSize = 32 + 16; // 4 home locations for registers, 16 bytes for additional function call arguments
constexpr unsigned kSpillSlots = 4; // locations for register allocator to spill data into
constexpr unsigned kLocalsSize = 24 + 8 * kSpillSlots; // 3 extra slots for our custom locals (also aligns the stack to 16 byte boundary)
constexpr OperandX64 sClosure = qword[rsp + kStackSize + 0]; // Closure* cl
constexpr OperandX64 sCode = qword[rsp + kStackSize + 8]; // Instruction* code
constexpr OperandX64 sTemporarySlot = addr[rsp + kStackSize + 16];
constexpr OperandX64 sSpillArea = addr[rsp + kStackSize + 24];
inline OperandX64 luauReg(int ri)
{
return xmmword[rBase + ri * sizeof(TValue)];
}
inline OperandX64 luauRegAddress(int ri)
{
return addr[rBase + ri * sizeof(TValue)];
}
inline OperandX64 luauRegValue(int ri)
{
return qword[rBase + ri * sizeof(TValue) + offsetof(TValue, value)];
}
inline OperandX64 luauRegTag(int ri)
{
return dword[rBase + ri * sizeof(TValue) + offsetof(TValue, tt)];
}
inline OperandX64 luauRegValueInt(int ri)
{
return dword[rBase + ri * sizeof(TValue) + offsetof(TValue, value)];
}
inline OperandX64 luauRegValueVector(int ri, int index)
{
return dword[rBase + ri * sizeof(TValue) + offsetof(TValue, value) + (sizeof(float) * index)];
}
inline OperandX64 luauConstant(int ki)
{
return xmmword[rConstants + ki * sizeof(TValue)];
}
inline OperandX64 luauConstantAddress(int ki)
{
return addr[rConstants + ki * sizeof(TValue)];
}
inline OperandX64 luauConstantTag(int ki)
{
return dword[rConstants + ki * sizeof(TValue) + offsetof(TValue, tt)];
}
inline OperandX64 luauConstantValue(int ki)
{
return qword[rConstants + ki * sizeof(TValue) + offsetof(TValue, value)];
}
inline OperandX64 luauNodeKeyValue(RegisterX64 node)
{
return qword[node + offsetof(LuaNode, key) + offsetof(TKey, value)];
}
// Note: tag has dirty upper bits
inline OperandX64 luauNodeKeyTag(RegisterX64 node)
{
return dword[node + offsetof(LuaNode, key) + kOffsetOfTKeyTagNext];
}
inline OperandX64 luauNodeValue(RegisterX64 node)
{
return xmmword[node + offsetof(LuaNode, val)];
}
inline void setLuauReg(AssemblyBuilderX64& build, RegisterX64 tmp, int ri, OperandX64 op)
{
LUAU_ASSERT(op.cat == CategoryX64::mem);
build.vmovups(tmp, op);
build.vmovups(luauReg(ri), tmp);
}
inline void jumpIfTagIs(AssemblyBuilderX64& build, int ri, lua_Type tag, Label& label)
{
build.cmp(luauRegTag(ri), tag);
build.jcc(ConditionX64::Equal, label);
}
inline void jumpIfTagIsNot(AssemblyBuilderX64& build, int ri, lua_Type tag, Label& label)
{
build.cmp(luauRegTag(ri), tag);
build.jcc(ConditionX64::NotEqual, label);
}
// Note: fallthrough label should be placed after this condition
inline void jumpIfFalsy(AssemblyBuilderX64& build, int ri, Label& target, Label& fallthrough)
{
jumpIfTagIs(build, ri, LUA_TNIL, target); // false if nil
jumpIfTagIsNot(build, ri, LUA_TBOOLEAN, fallthrough); // true if not nil or boolean
build.cmp(luauRegValueInt(ri), 0);
build.jcc(ConditionX64::Equal, target); // true if boolean value is 'true'
}
// Note: fallthrough label should be placed after this condition
inline void jumpIfTruthy(AssemblyBuilderX64& build, int ri, Label& target, Label& fallthrough)
{
jumpIfTagIs(build, ri, LUA_TNIL, fallthrough); // false if nil
jumpIfTagIsNot(build, ri, LUA_TBOOLEAN, target); // true if not nil or boolean
build.cmp(luauRegValueInt(ri), 0);
build.jcc(ConditionX64::NotEqual, target); // true if boolean value is 'true'
}
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, IrCondition cond, Label& label);
void jumpOnAnyCmpFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, IrCondition cond, Label& label);
void getTableNodeAtCachedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, RegisterX64 table, int pcpos);
void convertNumberToIndexOrJump(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 numd, RegisterX64 numi, Label& label);
void callArithHelper(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, OperandX64 c, TMS tm);
void callLengthHelper(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb);
void callPrepareForN(IrRegAllocX64& regs, AssemblyBuilderX64& build, int limit, int step, int init);
void callGetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra);
void callSetTable(IrRegAllocX64& regs, AssemblyBuilderX64& build, int rb, OperandX64 c, int ra);
void checkObjectBarrierConditions(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 object, int ra, Label& skip);
void callBarrierObject(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 object, IrOp objectOp, int ra);
void callBarrierTableFast(IrRegAllocX64& regs, AssemblyBuilderX64& build, RegisterX64 table, IrOp tableOp);
void callStepGc(IrRegAllocX64& regs, AssemblyBuilderX64& build);
void emitExit(AssemblyBuilderX64& build, bool continueInVm);
void emitUpdateBase(AssemblyBuilderX64& build);
void emitInterrupt(AssemblyBuilderX64& build);
void emitFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, int offset, int pcpos);
void emitContinueCallInVm(AssemblyBuilderX64& build);
void emitReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers);
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,430 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "EmitInstructionX64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/IrRegAllocX64.h"
#include "EmitCommonX64.h"
#include "NativeState.h"
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
namespace X64
{
void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int nparams, int nresults)
{
// TODO: This should use IrCallWrapperX64
RegisterX64 rArg1 = (build.abi == ABIX64::Windows) ? rcx : rdi;
RegisterX64 rArg2 = (build.abi == ABIX64::Windows) ? rdx : rsi;
RegisterX64 rArg3 = (build.abi == ABIX64::Windows) ? r8 : rdx;
RegisterX64 rArg4 = (build.abi == ABIX64::Windows) ? r9 : rcx;
build.mov(rArg1, rState);
build.lea(rArg2, luauRegAddress(ra));
if (nparams == LUA_MULTRET)
build.mov(rArg3, qword[rState + offsetof(lua_State, top)]);
else
build.lea(rArg3, luauRegAddress(ra + 1 + nparams));
build.mov(dwordReg(rArg4), nresults);
build.call(qword[rNativeContext + offsetof(NativeContext, callProlog)]);
RegisterX64 ccl = rax; // Returned from callProlog
emitUpdateBase(build);
Label cFuncCall;
build.test(byte[ccl + offsetof(Closure, isC)], 1);
build.jcc(ConditionX64::NotZero, cFuncCall);
{
RegisterX64 proto = rcx; // Sync with emitContinueCallInVm
RegisterX64 ci = rdx;
RegisterX64 argi = rsi;
RegisterX64 argend = rdi;
build.mov(proto, qword[ccl + offsetof(Closure, l.p)]);
// Switch current Closure
build.mov(sClosure, ccl); // Last use of 'ccl'
build.mov(ci, qword[rState + offsetof(lua_State, ci)]);
Label fillnil, exitfillnil;
// argi = L->top
build.mov(argi, qword[rState + offsetof(lua_State, top)]);
// argend = L->base + p->numparams
build.movzx(eax, byte[proto + offsetof(Proto, numparams)]);
build.shl(eax, kTValueSizeLog2);
build.lea(argend, addr[rBase + rax]);
// while (argi < argend) setnilvalue(argi++);
build.setLabel(fillnil);
build.cmp(argi, argend);
build.jcc(ConditionX64::NotBelow, exitfillnil);
build.mov(dword[argi + offsetof(TValue, tt)], LUA_TNIL);
build.add(argi, sizeof(TValue));
build.jmp(fillnil); // This loop rarely runs so it's not worth repeating cmp/jcc
build.setLabel(exitfillnil);
// Set L->top to ci->top as most function expect (no vararg)
build.mov(rax, qword[ci + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
// But if it is vararg, update it to 'argi'
Label skipVararg;
build.test(byte[proto + offsetof(Proto, is_vararg)], 1);
build.jcc(ConditionX64::Zero, skipVararg);
build.mov(qword[rState + offsetof(lua_State, top)], argi);
build.setLabel(skipVararg);
// Get native function entry
build.mov(rax, qword[proto + offsetof(Proto, exectarget)]);
build.test(rax, rax);
build.jcc(ConditionX64::Zero, helpers.continueCallInVm);
// Mark call frame as native
build.mov(dword[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_NATIVE);
// Switch current constants
build.mov(rConstants, qword[proto + offsetof(Proto, k)]);
// Switch current code
build.mov(rdx, qword[proto + offsetof(Proto, code)]);
build.mov(sCode, rdx);
build.jmp(rax);
}
build.setLabel(cFuncCall);
{
// results = ccl->c.f(L);
build.mov(rArg1, rState);
build.call(qword[ccl + offsetof(Closure, c.f)]); // Last use of 'ccl'
RegisterX64 results = eax;
build.test(results, results); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(ConditionX64::Less, helpers.exitNoContinueVm); // jl jumps if SF != OF
// We have special handling for small number of expected results below
if (nresults != 0 && nresults != 1)
{
build.mov(rArg1, rState);
build.mov(dwordReg(rArg2), nresults);
build.mov(dwordReg(rArg3), results);
build.call(qword[rNativeContext + offsetof(NativeContext, callEpilogC)]);
emitUpdateBase(build);
return;
}
RegisterX64 ci = rdx;
RegisterX64 cip = rcx;
RegisterX64 vali = rsi;
build.mov(ci, qword[rState + offsetof(lua_State, ci)]);
build.lea(cip, addr[ci - sizeof(CallInfo)]);
// L->base = cip->base
build.mov(rBase, qword[cip + offsetof(CallInfo, base)]);
build.mov(qword[rState + offsetof(lua_State, base)], rBase);
if (nresults == 1)
{
// Opportunistically copy the result we expected from (L->top - results)
build.mov(vali, qword[rState + offsetof(lua_State, top)]);
build.shl(results, kTValueSizeLog2);
build.sub(vali, qwordReg(results));
build.vmovups(xmm0, xmmword[vali]);
build.vmovups(luauReg(ra), xmm0);
Label skipnil;
// If there was no result, override the value with 'nil'
build.test(results, results);
build.jcc(ConditionX64::NotZero, skipnil);
build.mov(luauRegTag(ra), LUA_TNIL);
build.setLabel(skipnil);
}
// L->ci = cip
build.mov(qword[rState + offsetof(lua_State, ci)], cip);
// L->top = cip->top
build.mov(rax, qword[cip + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax);
}
}
void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int actualResults, bool functionVariadic)
{
RegisterX64 res = rdi;
RegisterX64 written = ecx;
if (functionVariadic)
{
build.mov(res, qword[rState + offsetof(lua_State, ci)]);
build.mov(res, qword[res + offsetof(CallInfo, func)]);
}
else if (actualResults != 1)
build.lea(res, addr[rBase - sizeof(TValue)]); // invariant: ci->func + 1 == ci->base for non-variadic frames
if (actualResults == 0)
{
build.xor_(written, written);
build.jmp(helpers.return_);
}
else if (actualResults == 1 && !functionVariadic)
{
// fast path: minimizes res adjustments
// note that we skipped res computation for this specific case above
build.vmovups(xmm0, luauReg(ra));
build.vmovups(xmmword[rBase - sizeof(TValue)], xmm0);
build.mov(res, rBase);
build.mov(written, 1);
build.jmp(helpers.return_);
}
else if (actualResults >= 1 && actualResults <= 3)
{
for (int r = 0; r < actualResults; ++r)
{
build.vmovups(xmm0, luauReg(ra + r));
build.vmovups(xmmword[res + r * sizeof(TValue)], xmm0);
}
build.add(res, actualResults * sizeof(TValue));
build.mov(written, actualResults);
build.jmp(helpers.return_);
}
else
{
RegisterX64 vali = rax;
RegisterX64 valend = rdx;
// vali = ra
build.lea(vali, luauRegAddress(ra));
// Copy as much as possible for MULTRET calls, and only as much as needed otherwise
if (actualResults == LUA_MULTRET)
build.mov(valend, qword[rState + offsetof(lua_State, top)]); // valend = L->top
else
build.lea(valend, luauRegAddress(ra + actualResults)); // valend = ra + actualResults
build.xor_(written, written);
Label repeatValueLoop, exitValueLoop;
if (actualResults == LUA_MULTRET)
{
build.cmp(vali, valend);
build.jcc(ConditionX64::NotBelow, exitValueLoop);
}
build.setLabel(repeatValueLoop);
build.vmovups(xmm0, xmmword[vali]);
build.vmovups(xmmword[res], xmm0);
build.add(vali, sizeof(TValue));
build.add(res, sizeof(TValue));
build.inc(written);
build.cmp(vali, valend);
build.jcc(ConditionX64::Below, repeatValueLoop);
build.setLabel(exitValueLoop);
build.jmp(helpers.return_);
}
}
void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, int count, uint32_t index)
{
// TODO: This should use IrCallWrapperX64
RegisterX64 rArg1 = (build.abi == ABIX64::Windows) ? rcx : rdi;
RegisterX64 rArg2 = (build.abi == ABIX64::Windows) ? rdx : rsi;
RegisterX64 rArg3 = (build.abi == ABIX64::Windows) ? r8 : rdx;
OperandX64 last = index + count - 1;
// Using non-volatile 'rbx' for dynamic 'count' value (for LUA_MULTRET) to skip later recomputation
// We also keep 'count' scaled by sizeof(TValue) here as it helps in the loop below
RegisterX64 cscaled = rbx;
if (count == LUA_MULTRET)
{
RegisterX64 tmp = rax;
// count = L->top - rb
build.mov(cscaled, qword[rState + offsetof(lua_State, top)]);
build.lea(tmp, luauRegAddress(rb));
build.sub(cscaled, tmp); // Using byte difference
// L->top = L->ci->top
build.mov(tmp, qword[rState + offsetof(lua_State, ci)]);
build.mov(tmp, qword[tmp + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], tmp);
// last = index + count - 1;
last = edx;
build.mov(last, dwordReg(cscaled));
build.shr(last, kTValueSizeLog2);
build.add(last, index - 1);
}
Label skipResize;
RegisterX64 table = regs.takeReg(rax, kInvalidInstIdx);
build.mov(table, luauRegValue(ra));
// Resize if h->sizearray < last
build.cmp(dword[table + offsetof(Table, sizearray)], last);
build.jcc(ConditionX64::NotBelow, skipResize);
// Argument setup reordered to avoid conflicts
LUAU_ASSERT(rArg3 != table);
build.mov(dwordReg(rArg3), last);
build.mov(rArg2, table);
build.mov(rArg1, rState);
build.call(qword[rNativeContext + offsetof(NativeContext, luaH_resizearray)]);
build.mov(table, luauRegValue(ra)); // Reload cloberred register value
build.setLabel(skipResize);
RegisterX64 arrayDst = rdx;
RegisterX64 offset = rcx;
build.mov(arrayDst, qword[table + offsetof(Table, array)]);
const int kUnrollSetListLimit = 4;
if (count != LUA_MULTRET && count <= kUnrollSetListLimit)
{
for (int i = 0; i < count; ++i)
{
// setobj2t(L, &array[index + i - 1], rb + i);
build.vmovups(xmm0, luauRegValue(rb + i));
build.vmovups(xmmword[arrayDst + (index + i - 1) * sizeof(TValue)], xmm0);
}
}
else
{
LUAU_ASSERT(count != 0);
build.xor_(offset, offset);
if (index != 1)
build.add(arrayDst, (index - 1) * sizeof(TValue));
Label repeatLoop, endLoop;
OperandX64 limit = count == LUA_MULTRET ? cscaled : OperandX64(count * sizeof(TValue));
// If c is static, we will always do at least one iteration
if (count == LUA_MULTRET)
{
build.cmp(offset, limit);
build.jcc(ConditionX64::NotBelow, endLoop);
}
build.setLabel(repeatLoop);
// setobj2t(L, &array[index + i - 1], rb + i);
build.vmovups(xmm0, xmmword[offset + rBase + rb * sizeof(TValue)]); // luauReg(rb) unwrapped to add offset
build.vmovups(xmmword[offset + arrayDst], xmm0);
build.add(offset, sizeof(TValue));
build.cmp(offset, limit);
build.jcc(ConditionX64::Below, repeatLoop);
build.setLabel(endLoop);
}
callBarrierTableFast(regs, build, table, {});
}
void emitInstForGLoop(AssemblyBuilderX64& build, int ra, int aux, Label& loopRepeat)
{
// ipairs-style traversal is handled in IR
LUAU_ASSERT(aux >= 0);
// TODO: This should use IrCallWrapperX64
RegisterX64 rArg1 = (build.abi == ABIX64::Windows) ? rcx : rdi;
RegisterX64 rArg2 = (build.abi == ABIX64::Windows) ? rdx : rsi;
RegisterX64 rArg3 = (build.abi == ABIX64::Windows) ? r8 : rdx;
RegisterX64 rArg4 = (build.abi == ABIX64::Windows) ? r9 : rcx;
// This is a fast-path for builtin table iteration, tag check for 'ra' has to be performed before emitting this instruction
// Registers are chosen in this way to simplify fallback code for the node part
RegisterX64 table = rArg2;
RegisterX64 index = rArg3;
RegisterX64 elemPtr = rax;
build.mov(table, luauRegValue(ra + 1));
build.mov(index, luauRegValue(ra + 2));
// &array[index]
build.mov(dwordReg(elemPtr), dwordReg(index));
build.shl(dwordReg(elemPtr), kTValueSizeLog2);
build.add(elemPtr, qword[table + offsetof(Table, array)]);
// Clear extra variables since we might have more than two
for (int i = 2; i < aux; ++i)
build.mov(luauRegTag(ra + 3 + i), LUA_TNIL);
Label skipArray, skipArrayNil;
// First we advance index through the array portion
// while (unsigned(index) < unsigned(sizearray))
Label arrayLoop = build.setLabel();
build.cmp(dwordReg(index), dword[table + offsetof(Table, sizearray)]);
build.jcc(ConditionX64::NotBelow, skipArray);
// If element is nil, we increment the index; if it's not, we still need 'index + 1' inside
build.inc(index);
build.cmp(dword[elemPtr + offsetof(TValue, tt)], LUA_TNIL);
build.jcc(ConditionX64::Equal, skipArrayNil);
// setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)));
build.mov(luauRegValue(ra + 2), index);
// Tag should already be set to lightuserdata
// setnvalue(ra + 3, double(index + 1));
build.vcvtsi2sd(xmm0, xmm0, dwordReg(index));
build.vmovsd(luauRegValue(ra + 3), xmm0);
build.mov(luauRegTag(ra + 3), LUA_TNUMBER);
// setobj2s(L, ra + 4, e);
setLuauReg(build, xmm2, ra + 4, xmmword[elemPtr]);
build.jmp(loopRepeat);
build.setLabel(skipArrayNil);
// Index already incremented, advance to next array element
build.add(elemPtr, sizeof(TValue));
build.jmp(arrayLoop);
build.setLabel(skipArray);
// Call helper to assign next node value or to signal loop exit
build.mov(rArg1, rState);
// rArg2 and rArg3 are already set
build.lea(rArg4, luauRegAddress(ra));
build.call(qword[rNativeContext + offsetof(NativeContext, forgLoopNodeIter)]);
build.test(al, al);
build.jcc(ConditionX64::NotZero, loopRepeat);
}
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,27 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
struct Label;
struct ModuleHelpers;
namespace X64
{
class AssemblyBuilderX64;
struct IrRegAllocX64;
void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int nparams, int nresults);
void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int actualResults, bool functionVariadic);
void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, int count, uint32_t index);
void emitInstForGLoop(AssemblyBuilderX64& build, int ra, int aux, Label& loopRepeat);
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,904 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/IrAnalysis.h"
#include "Luau/DenseHash.h"
#include "Luau/IrData.h"
#include "Luau/IrUtils.h"
#include "lobject.h"
#include <bitset>
#include <stddef.h>
namespace Luau
{
namespace CodeGen
{
void updateUseCounts(IrFunction& function)
{
std::vector<IrBlock>& blocks = function.blocks;
std::vector<IrInst>& instructions = function.instructions;
for (IrBlock& block : blocks)
block.useCount = 0;
for (IrInst& inst : instructions)
inst.useCount = 0;
auto checkOp = [&](IrOp op) {
if (op.kind == IrOpKind::Inst)
{
IrInst& target = instructions[op.index];
LUAU_ASSERT(target.useCount < 0xffff);
target.useCount++;
}
else if (op.kind == IrOpKind::Block)
{
IrBlock& target = blocks[op.index];
LUAU_ASSERT(target.useCount < 0xffff);
target.useCount++;
}
};
for (IrInst& inst : instructions)
{
checkOp(inst.a);
checkOp(inst.b);
checkOp(inst.c);
checkOp(inst.d);
checkOp(inst.e);
checkOp(inst.f);
}
}
void updateLastUseLocations(IrFunction& function)
{
std::vector<IrInst>& instructions = function.instructions;
for (IrInst& inst : instructions)
inst.lastUse = 0;
for (size_t instIdx = 0; instIdx < instructions.size(); ++instIdx)
{
IrInst& inst = instructions[instIdx];
auto checkOp = [&](IrOp op) {
if (op.kind == IrOpKind::Inst)
instructions[op.index].lastUse = uint32_t(instIdx);
};
if (isPseudo(inst.cmd))
continue;
checkOp(inst.a);
checkOp(inst.b);
checkOp(inst.c);
checkOp(inst.d);
checkOp(inst.e);
checkOp(inst.f);
}
}
uint32_t getNextInstUse(IrFunction& function, uint32_t targetInstIdx, uint32_t startInstIdx)
{
LUAU_ASSERT(startInstIdx < function.instructions.size());
IrInst& targetInst = function.instructions[targetInstIdx];
for (uint32_t i = startInstIdx; i <= targetInst.lastUse; i++)
{
IrInst& inst = function.instructions[i];
if (isPseudo(inst.cmd))
continue;
if (inst.a.kind == IrOpKind::Inst && inst.a.index == targetInstIdx)
return i;
if (inst.b.kind == IrOpKind::Inst && inst.b.index == targetInstIdx)
return i;
if (inst.c.kind == IrOpKind::Inst && inst.c.index == targetInstIdx)
return i;
if (inst.d.kind == IrOpKind::Inst && inst.d.index == targetInstIdx)
return i;
if (inst.e.kind == IrOpKind::Inst && inst.e.index == targetInstIdx)
return i;
if (inst.f.kind == IrOpKind::Inst && inst.f.index == targetInstIdx)
return i;
}
// There must be a next use since there is the last use location
LUAU_ASSERT(!"Failed to find next use");
return targetInst.lastUse;
}
std::pair<uint32_t, uint32_t> getLiveInOutValueCount(IrFunction& function, IrBlock& block)
{
uint32_t liveIns = 0;
uint32_t liveOuts = 0;
auto checkOp = [&](IrOp op) {
if (op.kind == IrOpKind::Inst)
{
if (op.index >= block.start && op.index <= block.finish)
liveOuts--;
else
liveIns++;
}
};
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
{
IrInst& inst = function.instructions[instIdx];
if (isPseudo(inst.cmd))
continue;
liveOuts += inst.useCount;
checkOp(inst.a);
checkOp(inst.b);
checkOp(inst.c);
checkOp(inst.d);
checkOp(inst.e);
checkOp(inst.f);
}
return std::make_pair(liveIns, liveOuts);
}
uint32_t getLiveInValueCount(IrFunction& function, IrBlock& block)
{
return getLiveInOutValueCount(function, block).first;
}
uint32_t getLiveOutValueCount(IrFunction& function, IrBlock& block)
{
return getLiveInOutValueCount(function, block).second;
}
void requireVariadicSequence(RegisterSet& sourceRs, const RegisterSet& defRs, uint8_t varargStart)
{
if (!defRs.varargSeq)
{
// Peel away registers from variadic sequence that we define
while (defRs.regs.test(varargStart))
varargStart++;
LUAU_ASSERT(!sourceRs.varargSeq || sourceRs.varargStart == varargStart);
sourceRs.varargSeq = true;
sourceRs.varargStart = varargStart;
}
else
{
// Variadic use sequence might include registers before def sequence
for (int i = varargStart; i < defRs.varargStart; i++)
{
if (!defRs.regs.test(i))
sourceRs.regs.set(i);
}
}
}
static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock& block, RegisterSet& defRs, std::bitset<256>& capturedRegs)
{
RegisterSet inRs;
auto def = [&](IrOp op, int offset = 0) {
defRs.regs.set(vmRegOp(op) + offset, true);
};
auto use = [&](IrOp op, int offset = 0) {
if (!defRs.regs.test(vmRegOp(op) + offset))
inRs.regs.set(vmRegOp(op) + offset, true);
};
auto maybeDef = [&](IrOp op) {
if (op.kind == IrOpKind::VmReg)
defRs.regs.set(vmRegOp(op), true);
};
auto maybeUse = [&](IrOp op) {
if (op.kind == IrOpKind::VmReg)
{
if (!defRs.regs.test(vmRegOp(op)))
inRs.regs.set(vmRegOp(op), true);
}
};
auto defVarargs = [&](uint8_t varargStart) {
defRs.varargSeq = true;
defRs.varargStart = varargStart;
};
auto useVarargs = [&](uint8_t varargStart) {
requireVariadicSequence(inRs, defRs, varargStart);
// Variadic sequence has been consumed
defRs.varargSeq = false;
defRs.varargStart = 0;
};
auto defRange = [&](int start, int count) {
if (count == -1)
{
defVarargs(start);
}
else
{
for (int i = start; i < start + count; i++)
defRs.regs.set(i, true);
}
};
auto useRange = [&](int start, int count) {
if (count == -1)
{
useVarargs(start);
}
else
{
for (int i = start; i < start + count; i++)
{
if (!defRs.regs.test(i))
inRs.regs.set(i, true);
}
}
};
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
{
const IrInst& inst = function.instructions[instIdx];
// For correct analysis, all instruction uses must be handled before handling the definitions
switch (inst.cmd)
{
case IrCmd::LOAD_TAG:
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_TVALUE:
maybeUse(inst.a); // Argument can also be a VmConst
break;
case IrCmd::STORE_TAG:
case IrCmd::STORE_POINTER:
case IrCmd::STORE_DOUBLE:
case IrCmd::STORE_INT:
case IrCmd::STORE_VECTOR:
case IrCmd::STORE_TVALUE:
maybeDef(inst.a); // Argument can also be a pointer value
break;
case IrCmd::JUMP_IF_TRUTHY:
case IrCmd::JUMP_IF_FALSY:
use(inst.a);
break;
case IrCmd::JUMP_CMP_ANY:
use(inst.a);
use(inst.b);
break;
// A <- B, C
case IrCmd::DO_ARITH:
case IrCmd::GET_TABLE:
use(inst.b);
maybeUse(inst.c); // Argument can also be a VmConst
def(inst.a);
break;
case IrCmd::SET_TABLE:
use(inst.a);
use(inst.b);
maybeUse(inst.c); // Argument can also be a VmConst
break;
// A <- B
case IrCmd::DO_LEN:
use(inst.b);
def(inst.a);
break;
case IrCmd::GET_IMPORT:
def(inst.a);
break;
case IrCmd::CONCAT:
useRange(vmRegOp(inst.a), function.uintOp(inst.b));
defRange(vmRegOp(inst.a), function.uintOp(inst.b));
break;
case IrCmd::GET_UPVALUE:
def(inst.a);
break;
case IrCmd::SET_UPVALUE:
use(inst.b);
break;
case IrCmd::PREPARE_FORN:
use(inst.a);
use(inst.b);
use(inst.c);
def(inst.a);
def(inst.b);
def(inst.c);
break;
case IrCmd::INTERRUPT:
break;
case IrCmd::BARRIER_OBJ:
case IrCmd::BARRIER_TABLE_FORWARD:
use(inst.b);
break;
case IrCmd::CLOSE_UPVALS:
// Closing an upvalue should be counted as a register use (it copies the fresh register value)
// But we lack the required information about the specific set of registers that are affected
// Because we don't plan to optimize captured registers atm, we skip full dataflow analysis for them right now
break;
case IrCmd::CAPTURE:
maybeUse(inst.a);
if (function.uintOp(inst.b) == 1)
capturedRegs.set(vmRegOp(inst.a), true);
break;
case IrCmd::SETLIST:
use(inst.b);
useRange(vmRegOp(inst.c), function.intOp(inst.d));
break;
case IrCmd::CALL:
use(inst.a);
useRange(vmRegOp(inst.a) + 1, function.intOp(inst.b));
defRange(vmRegOp(inst.a), function.intOp(inst.c));
break;
case IrCmd::RETURN:
useRange(vmRegOp(inst.a), function.intOp(inst.b));
break;
// TODO: FASTCALL is more restrictive than INVOKE_FASTCALL; we should either determine the exact semantics, or rework it
case IrCmd::FASTCALL:
case IrCmd::INVOKE_FASTCALL:
if (int count = function.intOp(inst.e); count != -1)
{
if (count >= 3)
{
LUAU_ASSERT(inst.d.kind == IrOpKind::VmReg && vmRegOp(inst.d) == vmRegOp(inst.c) + 1);
useRange(vmRegOp(inst.c), count);
}
else
{
if (count >= 1)
use(inst.c);
if (count >= 2)
maybeUse(inst.d); // Argument can also be a VmConst
}
}
else
{
useVarargs(vmRegOp(inst.c));
}
// Multiple return sequences (count == -1) are defined by ADJUST_STACK_TO_REG
if (int count = function.intOp(inst.f); count != -1)
defRange(vmRegOp(inst.b), count);
break;
case IrCmd::FORGLOOP:
// First register is not used by instruction, we check that it's still 'nil' with CHECK_TAG
use(inst.a, 1);
use(inst.a, 2);
def(inst.a, 2);
defRange(vmRegOp(inst.a) + 3, function.intOp(inst.b));
break;
case IrCmd::FORGLOOP_FALLBACK:
useRange(vmRegOp(inst.a), 3);
def(inst.a, 2);
defRange(vmRegOp(inst.a) + 3, uint8_t(function.intOp(inst.b))); // ignore most significant bit
break;
case IrCmd::FORGPREP_XNEXT_FALLBACK:
use(inst.b);
break;
case IrCmd::FALLBACK_GETGLOBAL:
def(inst.b);
break;
case IrCmd::FALLBACK_SETGLOBAL:
use(inst.b);
break;
case IrCmd::FALLBACK_GETTABLEKS:
use(inst.c);
def(inst.b);
break;
case IrCmd::FALLBACK_SETTABLEKS:
use(inst.b);
use(inst.c);
break;
case IrCmd::FALLBACK_NAMECALL:
use(inst.c);
defRange(vmRegOp(inst.b), 2);
break;
case IrCmd::FALLBACK_PREPVARARGS:
// No effect on explicitly referenced registers
break;
case IrCmd::FALLBACK_GETVARARGS:
defRange(vmRegOp(inst.b), function.intOp(inst.c));
break;
case IrCmd::FALLBACK_NEWCLOSURE:
def(inst.b);
break;
case IrCmd::FALLBACK_DUPCLOSURE:
def(inst.b);
break;
case IrCmd::FALLBACK_FORGPREP:
use(inst.b);
defRange(vmRegOp(inst.b), 3);
break;
case IrCmd::ADJUST_STACK_TO_REG:
defRange(vmRegOp(inst.a), -1);
break;
case IrCmd::ADJUST_STACK_TO_TOP:
// While this can be considered to be a vararg consumer, it is already handled in fastcall instructions
break;
default:
// All instructions which reference registers have to be handled explicitly
LUAU_ASSERT(inst.a.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.b.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.d.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.e.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.f.kind != IrOpKind::VmReg);
break;
}
}
return inRs;
}
// The algorithm used here is commonly known as backwards data-flow analysis.
// For each block, we track 'upward-exposed' (live-in) uses of registers - a use of a register that hasn't been defined in the block yet.
// We also track the set of registers that were defined in the block.
// When initial live-in sets of registers are computed, propagation of those uses upwards through predecessors is performed.
// If predecessor doesn't define the register, we have to add it to the live-in set.
// Extending the set of live-in registers of a block requires re-checking of that block.
// Propagation runs iteratively, using a worklist of blocks to visit until a fixed point is reached.
// This algorithm can be easily extended to cover phi instructions, but we don't use those yet.
static void computeCfgLiveInOutRegSets(IrFunction& function)
{
CfgInfo& info = function.cfg;
// Clear existing data
// 'in' and 'captured' data is not cleared because it will be overwritten below
info.def.clear();
info.out.clear();
// Try to compute Luau VM register use-def info
info.in.resize(function.blocks.size());
info.def.resize(function.blocks.size());
info.out.resize(function.blocks.size());
// Captured registers are tracked for the whole function
// It should be possible to have a more precise analysis for them in the future
std::bitset<256> capturedRegs;
// First we compute live-in set of each block
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
const IrBlock& block = function.blocks[blockIdx];
if (block.kind == IrBlockKind::Dead)
continue;
info.in[blockIdx] = computeBlockLiveInRegSet(function, block, info.def[blockIdx], capturedRegs);
}
info.captured.regs = capturedRegs;
// With live-in sets ready, we can arrive at a fixed point for both in/out registers by requesting required registers from predecessors
std::vector<uint32_t> worklist;
std::vector<uint8_t> inWorklist;
inWorklist.resize(function.blocks.size(), false);
// We will have to visit each block at least once, so we add all of them to the worklist immediately
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
const IrBlock& block = function.blocks[blockIdx];
if (block.kind == IrBlockKind::Dead)
continue;
worklist.push_back(uint32_t(blockIdx));
inWorklist[blockIdx] = true;
}
while (!worklist.empty())
{
uint32_t blockIdx = worklist.back();
worklist.pop_back();
inWorklist[blockIdx] = false;
IrBlock& curr = function.blocks[blockIdx];
RegisterSet& inRs = info.in[blockIdx];
RegisterSet& defRs = info.def[blockIdx];
RegisterSet& outRs = info.out[blockIdx];
// Current block has to provide all registers in successor blocks
BlockIteratorWrapper successorsIt = successors(info, blockIdx);
for (uint32_t succIdx : successorsIt)
{
IrBlock& succ = function.blocks[succIdx];
// This is a step away from the usual definition of live range flow through CFG
// Exit from a regular block to a fallback block is not considered a block terminator
// This is because fallback blocks define an alternative implementation of the same operations
// This can cause the current block to define more registers that actually were available at fallback entry
if (curr.kind != IrBlockKind::Fallback && succ.kind == IrBlockKind::Fallback)
{
// If this is the only successor, this skip will not be valid
LUAU_ASSERT(successorsIt.size() != 1);
continue;
}
const RegisterSet& succRs = info.in[succIdx];
outRs.regs |= succRs.regs;
if (succRs.varargSeq)
{
LUAU_ASSERT(!outRs.varargSeq || outRs.varargStart == succRs.varargStart);
outRs.varargSeq = true;
outRs.varargStart = succRs.varargStart;
}
}
RegisterSet oldInRs = inRs;
// If current block didn't define a live-out, it has to be live-in
inRs.regs |= outRs.regs & ~defRs.regs;
if (outRs.varargSeq)
requireVariadicSequence(inRs, defRs, outRs.varargStart);
// If we have new live-ins, we have to notify all predecessors
// We don't allow changes to the start of the variadic sequence, so we skip checking that member
if (inRs.regs != oldInRs.regs || inRs.varargSeq != oldInRs.varargSeq)
{
for (uint32_t predIdx : predecessors(info, blockIdx))
{
if (!inWorklist[predIdx])
{
worklist.push_back(predIdx);
inWorklist[predIdx] = true;
}
}
}
}
// If Proto data is available, validate that entry block arguments match required registers
if (function.proto)
{
RegisterSet& entryIn = info.in[0];
LUAU_ASSERT(!entryIn.varargSeq);
for (size_t i = 0; i < entryIn.regs.size(); i++)
LUAU_ASSERT(!entryIn.regs.test(i) || i < function.proto->numparams);
}
}
static void computeCfgBlockEdges(IrFunction& function)
{
CfgInfo& info = function.cfg;
// Clear existing data
info.predecessorsOffsets.clear();
info.successorsOffsets.clear();
// Compute predecessors block edges
info.predecessorsOffsets.reserve(function.blocks.size());
info.successorsOffsets.reserve(function.blocks.size());
int edgeCount = 0;
for (const IrBlock& block : function.blocks)
{
info.predecessorsOffsets.push_back(edgeCount);
edgeCount += block.useCount;
}
info.predecessors.resize(edgeCount);
info.successors.resize(edgeCount);
edgeCount = 0;
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
const IrBlock& block = function.blocks[blockIdx];
info.successorsOffsets.push_back(edgeCount);
if (block.kind == IrBlockKind::Dead)
continue;
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
{
const IrInst& inst = function.instructions[instIdx];
auto checkOp = [&](IrOp op) {
if (op.kind == IrOpKind::Block)
{
// We use a trick here, where we use the starting offset of the predecessor list as the position where to write next predecessor
// The values will be adjusted back in a separate loop later
info.predecessors[info.predecessorsOffsets[op.index]++] = uint32_t(blockIdx);
info.successors[edgeCount++] = op.index;
}
};
checkOp(inst.a);
checkOp(inst.b);
checkOp(inst.c);
checkOp(inst.d);
checkOp(inst.e);
checkOp(inst.f);
}
}
// Offsets into the predecessor list were used as iterators in the previous loop
// To adjust them back, block use count is subtracted (predecessor count is equal to how many uses block has)
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
const IrBlock& block = function.blocks[blockIdx];
info.predecessorsOffsets[blockIdx] -= block.useCount;
}
}
// Assign tree depth and pre- and post- DFS visit order of the tree/graph nodes
// Optionally, collect required node order into a vector
template<auto childIt>
void computeBlockOrdering(
IrFunction& function, std::vector<BlockOrdering>& ordering, std::vector<uint32_t>* preOrder, std::vector<uint32_t>* postOrder)
{
CfgInfo& info = function.cfg;
LUAU_ASSERT(info.idoms.size() == function.blocks.size());
ordering.clear();
ordering.resize(function.blocks.size());
// Get depth-first post-order using manual stack instead of recursion
struct StackItem
{
uint32_t blockIdx;
uint32_t itPos;
};
std::vector<StackItem> stack;
if (preOrder)
preOrder->reserve(function.blocks.size());
if (postOrder)
postOrder->reserve(function.blocks.size());
uint32_t nextPreOrder = 0;
uint32_t nextPostOrder = 0;
stack.push_back({0, 0});
ordering[0].visited = true;
ordering[0].preOrder = nextPreOrder++;
while (!stack.empty())
{
StackItem& item = stack.back();
BlockIteratorWrapper children = childIt(info, item.blockIdx);
if (item.itPos < children.size())
{
uint32_t childIdx = children[item.itPos++];
BlockOrdering& childOrdering = ordering[childIdx];
if (!childOrdering.visited)
{
childOrdering.visited = true;
childOrdering.depth = uint32_t(stack.size());
childOrdering.preOrder = nextPreOrder++;
if (preOrder)
preOrder->push_back(item.blockIdx);
stack.push_back({childIdx, 0});
}
}
else
{
ordering[item.blockIdx].postOrder = nextPostOrder++;
if (postOrder)
postOrder->push_back(item.blockIdx);
stack.pop_back();
}
}
}
// Dominance tree construction based on 'A Simple, Fast Dominance Algorithm' [Keith D. Cooper, et al]
// This solution has quadratic complexity in the worst case.
// It is possible to switch to SEMI-NCA algorithm (also quadratic) mentioned in 'Linear-Time Algorithms for Dominators and Related Problems' [Loukas
// Georgiadis]
// Find block that is common between blocks 'a' and 'b' on the path towards the entry
static uint32_t findCommonDominator(const std::vector<uint32_t>& idoms, const std::vector<BlockOrdering>& data, uint32_t a, uint32_t b)
{
while (a != b)
{
while (data[a].postOrder < data[b].postOrder)
{
a = idoms[a];
LUAU_ASSERT(a != ~0u);
}
while (data[b].postOrder < data[a].postOrder)
{
b = idoms[b];
LUAU_ASSERT(b != ~0u);
}
}
return a;
}
void computeCfgImmediateDominators(IrFunction& function)
{
CfgInfo& info = function.cfg;
// Clear existing data
info.idoms.clear();
info.idoms.resize(function.blocks.size(), ~0u);
std::vector<BlockOrdering> ordering;
std::vector<uint32_t> blocksInPostOrder;
computeBlockOrdering<successors>(function, ordering, /* preOrder */ nullptr, &blocksInPostOrder);
// Entry node is temporarily marked to be an idom of itself to make algorithm work
info.idoms[0] = 0;
// Iteratively compute immediate dominators
bool updated = true;
while (updated)
{
updated = false;
// Go over blocks in reverse post-order of CFG
// '- 2' skips the root node which is last in post-order traversal
for (int i = int(blocksInPostOrder.size() - 2); i >= 0; i--)
{
uint32_t blockIdx = blocksInPostOrder[i];
uint32_t newIdom = ~0u;
for (uint32_t predIdx : predecessors(info, blockIdx))
{
if (uint32_t predIdom = info.idoms[predIdx]; predIdom != ~0u)
{
if (newIdom == ~0u)
newIdom = predIdx;
else
newIdom = findCommonDominator(info.idoms, ordering, newIdom, predIdx);
}
}
if (newIdom != info.idoms[blockIdx])
{
info.idoms[blockIdx] = newIdom;
// Run until a fixed point is reached
updated = true;
}
}
}
// Entry node doesn't have an immediate dominator
info.idoms[0] = ~0u;
}
void computeCfgDominanceTreeChildren(IrFunction& function)
{
CfgInfo& info = function.cfg;
// Clear existing data
info.domChildren.clear();
info.domChildrenOffsets.clear();
info.domChildrenOffsets.resize(function.blocks.size());
// First we need to know children count of each node in the dominance tree
// We use offset array for to hold this data, counts will be readjusted to offsets later
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
uint32_t domParent = info.idoms[blockIdx];
if (domParent != ~0u)
info.domChildrenOffsets[domParent]++;
}
// Convert counds to offsets using prefix sum
uint32_t total = 0;
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
uint32_t& offset = info.domChildrenOffsets[blockIdx];
uint32_t count = offset;
offset = total;
total += count;
}
info.domChildren.resize(total);
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
// We use a trick here, where we use the starting offset of the dominance children list as the position where to write next child
// The values will be adjusted back in a separate loop later
uint32_t domParent = info.idoms[blockIdx];
if (domParent != ~0u)
info.domChildren[info.domChildrenOffsets[domParent]++] = uint32_t(blockIdx);
}
// Offsets into the dominance children list were used as iterators in the previous loop
// That process basically moved the values in the array 1 step towards the start
// Here we move them one step towards the end and restore 0 for first offset
for (int blockIdx = int(function.blocks.size() - 1); blockIdx > 0; blockIdx--)
info.domChildrenOffsets[blockIdx] = info.domChildrenOffsets[blockIdx - 1];
info.domChildrenOffsets[0] = 0;
computeBlockOrdering<domChildren>(function, info.domOrdering, /* preOrder */ nullptr, /* postOrder */ nullptr);
}
void computeCfgInfo(IrFunction& function)
{
computeCfgBlockEdges(function);
computeCfgImmediateDominators(function);
computeCfgDominanceTreeChildren(function);
computeCfgLiveInOutRegSets(function);
}
BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx)
{
LUAU_ASSERT(blockIdx < cfg.predecessorsOffsets.size());
uint32_t start = cfg.predecessorsOffsets[blockIdx];
uint32_t end = blockIdx + 1 < cfg.predecessorsOffsets.size() ? cfg.predecessorsOffsets[blockIdx + 1] : uint32_t(cfg.predecessors.size());
return BlockIteratorWrapper{cfg.predecessors.data() + start, cfg.predecessors.data() + end};
}
BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx)
{
LUAU_ASSERT(blockIdx < cfg.successorsOffsets.size());
uint32_t start = cfg.successorsOffsets[blockIdx];
uint32_t end = blockIdx + 1 < cfg.successorsOffsets.size() ? cfg.successorsOffsets[blockIdx + 1] : uint32_t(cfg.successors.size());
return BlockIteratorWrapper{cfg.successors.data() + start, cfg.successors.data() + end};
}
BlockIteratorWrapper domChildren(const CfgInfo& cfg, uint32_t blockIdx)
{
LUAU_ASSERT(blockIdx < cfg.domChildrenOffsets.size());
uint32_t start = cfg.domChildrenOffsets[blockIdx];
uint32_t end = blockIdx + 1 < cfg.domChildrenOffsets.size() ? cfg.domChildrenOffsets[blockIdx + 1] : uint32_t(cfg.domChildren.size());
return BlockIteratorWrapper{cfg.domChildren.data() + start, cfg.domChildren.data() + end};
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,643 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/IrBuilder.h"
#include "Luau/IrAnalysis.h"
#include "Luau/IrUtils.h"
#include "IrTranslation.h"
#include "lapi.h"
#include <string.h>
namespace Luau
{
namespace CodeGen
{
constexpr unsigned kNoAssociatedBlockIndex = ~0u;
IrBuilder::IrBuilder()
: constantMap({IrConstKind::Tag, ~0ull})
{
}
void IrBuilder::buildFunctionIr(Proto* proto)
{
function.proto = proto;
function.variadic = proto->is_vararg != 0;
// Rebuild original control flow blocks
rebuildBytecodeBasicBlocks(proto);
function.bcMapping.resize(proto->sizecode, {~0u, ~0u});
// Translate all instructions to IR inside blocks
for (int i = 0; i < proto->sizecode;)
{
const Instruction* pc = &proto->code[i];
LuauOpcode op = LuauOpcode(LUAU_INSN_OP(*pc));
int nexti = i + getOpLength(op);
LUAU_ASSERT(nexti <= proto->sizecode);
function.bcMapping[i] = {uint32_t(function.instructions.size()), ~0u};
// Begin new block at this instruction if it was in the bytecode or requested during translation
if (instIndexToBlock[i] != kNoAssociatedBlockIndex)
beginBlock(blockAtInst(i));
// We skip dead bytecode instructions when they appear after block was already terminated
if (!inTerminatedBlock)
translateInst(op, pc, i);
i = nexti;
LUAU_ASSERT(i <= proto->sizecode);
// If we are going into a new block at the next instruction and it's a fallthrough, jump has to be placed to mark block termination
if (i < int(instIndexToBlock.size()) && instIndexToBlock[i] != kNoAssociatedBlockIndex)
{
if (!isBlockTerminator(function.instructions.back().cmd))
inst(IrCmd::JUMP, blockAtInst(i));
}
}
// Now that all has been generated, compute use counts
updateUseCounts(function);
}
void IrBuilder::rebuildBytecodeBasicBlocks(Proto* proto)
{
instIndexToBlock.resize(proto->sizecode, kNoAssociatedBlockIndex);
// Mark jump targets
std::vector<uint8_t> jumpTargets(proto->sizecode, 0);
for (int i = 0; i < proto->sizecode;)
{
const Instruction* pc = &proto->code[i];
LuauOpcode op = LuauOpcode(LUAU_INSN_OP(*pc));
int target = getJumpTarget(*pc, uint32_t(i));
if (target >= 0 && !isFastCall(op))
jumpTargets[target] = true;
i += getOpLength(op);
LUAU_ASSERT(i <= proto->sizecode);
}
// Bytecode blocks are created at bytecode jump targets and the start of a function
jumpTargets[0] = true;
for (int i = 0; i < proto->sizecode; i++)
{
if (jumpTargets[i])
{
IrOp b = block(IrBlockKind::Bytecode);
instIndexToBlock[i] = b.index;
}
}
}
void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
{
switch (op)
{
case LOP_NOP:
break;
case LOP_LOADNIL:
translateInstLoadNil(*this, pc);
break;
case LOP_LOADB:
translateInstLoadB(*this, pc, i);
break;
case LOP_LOADN:
translateInstLoadN(*this, pc);
break;
case LOP_LOADK:
translateInstLoadK(*this, pc);
break;
case LOP_LOADKX:
translateInstLoadKX(*this, pc);
break;
case LOP_MOVE:
translateInstMove(*this, pc);
break;
case LOP_GETGLOBAL:
translateInstGetGlobal(*this, pc, i);
break;
case LOP_SETGLOBAL:
translateInstSetGlobal(*this, pc, i);
break;
case LOP_CALL:
inst(IrCmd::INTERRUPT, constUint(i));
inst(IrCmd::SET_SAVEDPC, constUint(i + 1));
inst(IrCmd::CALL, vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1), constInt(LUAU_INSN_C(*pc) - 1));
if (activeFastcallFallback)
{
inst(IrCmd::JUMP, fastcallFallbackReturn);
beginBlock(fastcallFallbackReturn);
activeFastcallFallback = false;
}
break;
case LOP_RETURN:
inst(IrCmd::INTERRUPT, constUint(i));
inst(IrCmd::RETURN, vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1));
break;
case LOP_GETTABLE:
translateInstGetTable(*this, pc, i);
break;
case LOP_SETTABLE:
translateInstSetTable(*this, pc, i);
break;
case LOP_GETTABLEKS:
translateInstGetTableKS(*this, pc, i);
break;
case LOP_SETTABLEKS:
translateInstSetTableKS(*this, pc, i);
break;
case LOP_GETTABLEN:
translateInstGetTableN(*this, pc, i);
break;
case LOP_SETTABLEN:
translateInstSetTableN(*this, pc, i);
break;
case LOP_JUMP:
translateInstJump(*this, pc, i);
break;
case LOP_JUMPBACK:
translateInstJumpBack(*this, pc, i);
break;
case LOP_JUMPIF:
translateInstJumpIf(*this, pc, i, /* not_ */ false);
break;
case LOP_JUMPIFNOT:
translateInstJumpIf(*this, pc, i, /* not_ */ true);
break;
case LOP_JUMPIFEQ:
translateInstJumpIfEq(*this, pc, i, /* not_ */ false);
break;
case LOP_JUMPIFLE:
translateInstJumpIfCond(*this, pc, i, IrCondition::LessEqual);
break;
case LOP_JUMPIFLT:
translateInstJumpIfCond(*this, pc, i, IrCondition::Less);
break;
case LOP_JUMPIFNOTEQ:
translateInstJumpIfEq(*this, pc, i, /* not_ */ true);
break;
case LOP_JUMPIFNOTLE:
translateInstJumpIfCond(*this, pc, i, IrCondition::NotLessEqual);
break;
case LOP_JUMPIFNOTLT:
translateInstJumpIfCond(*this, pc, i, IrCondition::NotLess);
break;
case LOP_JUMPX:
translateInstJumpX(*this, pc, i);
break;
case LOP_JUMPXEQKNIL:
translateInstJumpxEqNil(*this, pc, i);
break;
case LOP_JUMPXEQKB:
translateInstJumpxEqB(*this, pc, i);
break;
case LOP_JUMPXEQKN:
translateInstJumpxEqN(*this, pc, i);
break;
case LOP_JUMPXEQKS:
translateInstJumpxEqS(*this, pc, i);
break;
case LOP_ADD:
translateInstBinary(*this, pc, i, TM_ADD);
break;
case LOP_SUB:
translateInstBinary(*this, pc, i, TM_SUB);
break;
case LOP_MUL:
translateInstBinary(*this, pc, i, TM_MUL);
break;
case LOP_DIV:
translateInstBinary(*this, pc, i, TM_DIV);
break;
case LOP_MOD:
translateInstBinary(*this, pc, i, TM_MOD);
break;
case LOP_POW:
translateInstBinary(*this, pc, i, TM_POW);
break;
case LOP_ADDK:
translateInstBinaryK(*this, pc, i, TM_ADD);
break;
case LOP_SUBK:
translateInstBinaryK(*this, pc, i, TM_SUB);
break;
case LOP_MULK:
translateInstBinaryK(*this, pc, i, TM_MUL);
break;
case LOP_DIVK:
translateInstBinaryK(*this, pc, i, TM_DIV);
break;
case LOP_MODK:
translateInstBinaryK(*this, pc, i, TM_MOD);
break;
case LOP_POWK:
translateInstBinaryK(*this, pc, i, TM_POW);
break;
case LOP_NOT:
translateInstNot(*this, pc);
break;
case LOP_MINUS:
translateInstMinus(*this, pc, i);
break;
case LOP_LENGTH:
translateInstLength(*this, pc, i);
break;
case LOP_NEWTABLE:
translateInstNewTable(*this, pc, i);
break;
case LOP_DUPTABLE:
translateInstDupTable(*this, pc, i);
break;
case LOP_SETLIST:
inst(IrCmd::SETLIST, constUint(i), vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), constInt(LUAU_INSN_C(*pc) - 1), constUint(pc[1]));
break;
case LOP_GETUPVAL:
translateInstGetUpval(*this, pc, i);
break;
case LOP_SETUPVAL:
translateInstSetUpval(*this, pc, i);
break;
case LOP_CLOSEUPVALS:
translateInstCloseUpvals(*this, pc);
break;
case LOP_FASTCALL:
{
int skip = LUAU_INSN_C(*pc);
IrOp next = blockAtInst(i + skip + 2);
translateFastCallN(*this, pc, i, false, 0, {}, next);
activeFastcallFallback = true;
fastcallFallbackReturn = next;
break;
}
case LOP_FASTCALL1:
{
int skip = LUAU_INSN_C(*pc);
IrOp next = blockAtInst(i + skip + 2);
translateFastCallN(*this, pc, i, true, 1, undef(), next);
activeFastcallFallback = true;
fastcallFallbackReturn = next;
break;
}
case LOP_FASTCALL2:
{
int skip = LUAU_INSN_C(*pc);
IrOp next = blockAtInst(i + skip + 2);
translateFastCallN(*this, pc, i, true, 2, vmReg(pc[1]), next);
activeFastcallFallback = true;
fastcallFallbackReturn = next;
break;
}
case LOP_FASTCALL2K:
{
int skip = LUAU_INSN_C(*pc);
IrOp next = blockAtInst(i + skip + 2);
translateFastCallN(*this, pc, i, true, 2, vmConst(pc[1]), next);
activeFastcallFallback = true;
fastcallFallbackReturn = next;
break;
}
case LOP_FORNPREP:
translateInstForNPrep(*this, pc, i);
break;
case LOP_FORNLOOP:
translateInstForNLoop(*this, pc, i);
break;
case LOP_FORGLOOP:
{
int aux = int(pc[1]);
// We have a translation for ipairs-style traversal, general loop iteration is still too complex
if (aux < 0)
{
translateInstForGLoopIpairs(*this, pc, i);
}
else
{
int ra = LUAU_INSN_A(*pc);
IrOp loopRepeat = blockAtInst(i + 1 + LUAU_INSN_D(*pc));
IrOp loopExit = blockAtInst(i + getOpLength(LOP_FORGLOOP));
IrOp fallback = block(IrBlockKind::Fallback);
inst(IrCmd::INTERRUPT, constUint(i));
loadAndCheckTag(vmReg(ra), LUA_TNIL, fallback);
inst(IrCmd::FORGLOOP, vmReg(ra), constInt(aux), loopRepeat, loopExit);
beginBlock(fallback);
inst(IrCmd::SET_SAVEDPC, constUint(i + 1));
inst(IrCmd::FORGLOOP_FALLBACK, vmReg(ra), constInt(aux), loopRepeat, loopExit);
beginBlock(loopExit);
}
break;
}
case LOP_FORGPREP_NEXT:
translateInstForGPrepNext(*this, pc, i);
break;
case LOP_FORGPREP_INEXT:
translateInstForGPrepInext(*this, pc, i);
break;
case LOP_AND:
translateInstAndX(*this, pc, i, vmReg(LUAU_INSN_C(*pc)));
break;
case LOP_ANDK:
translateInstAndX(*this, pc, i, vmConst(LUAU_INSN_C(*pc)));
break;
case LOP_OR:
translateInstOrX(*this, pc, i, vmReg(LUAU_INSN_C(*pc)));
break;
case LOP_ORK:
translateInstOrX(*this, pc, i, vmConst(LUAU_INSN_C(*pc)));
break;
case LOP_COVERAGE:
inst(IrCmd::COVERAGE, constUint(i));
break;
case LOP_GETIMPORT:
translateInstGetImport(*this, pc, i);
break;
case LOP_CONCAT:
translateInstConcat(*this, pc, i);
break;
case LOP_CAPTURE:
translateInstCapture(*this, pc, i);
break;
case LOP_NAMECALL:
translateInstNamecall(*this, pc, i);
break;
case LOP_PREPVARARGS:
inst(IrCmd::FALLBACK_PREPVARARGS, constUint(i), constInt(LUAU_INSN_A(*pc)));
break;
case LOP_GETVARARGS:
inst(IrCmd::FALLBACK_GETVARARGS, constUint(i), vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1));
break;
case LOP_NEWCLOSURE:
inst(IrCmd::FALLBACK_NEWCLOSURE, constUint(i), vmReg(LUAU_INSN_A(*pc)), constUint(LUAU_INSN_D(*pc)));
break;
case LOP_DUPCLOSURE:
inst(IrCmd::FALLBACK_DUPCLOSURE, constUint(i), vmReg(LUAU_INSN_A(*pc)), vmConst(LUAU_INSN_D(*pc)));
break;
case LOP_FORGPREP:
{
IrOp loopStart = blockAtInst(i + 1 + LUAU_INSN_D(*pc));
inst(IrCmd::FALLBACK_FORGPREP, constUint(i), vmReg(LUAU_INSN_A(*pc)), loopStart);
break;
}
default:
LUAU_ASSERT(!"Unknown instruction");
}
}
bool IrBuilder::isInternalBlock(IrOp block)
{
IrBlock& target = function.blocks[block.index];
return target.kind == IrBlockKind::Internal;
}
void IrBuilder::beginBlock(IrOp block)
{
IrBlock& target = function.blocks[block.index];
activeBlockIdx = block.index;
LUAU_ASSERT(target.start == ~0u || target.start == uint32_t(function.instructions.size()));
target.start = uint32_t(function.instructions.size());
target.sortkey = target.start;
inTerminatedBlock = false;
}
void IrBuilder::loadAndCheckTag(IrOp loc, uint8_t tag, IrOp fallback)
{
inst(IrCmd::CHECK_TAG, inst(IrCmd::LOAD_TAG, loc), constTag(tag), fallback);
}
void IrBuilder::clone(const IrBlock& source, bool removeCurrentTerminator)
{
DenseHashMap<uint32_t, uint32_t> instRedir{~0u};
auto redirect = [&instRedir](IrOp& op) {
if (op.kind == IrOpKind::Inst)
{
if (const uint32_t* newIndex = instRedir.find(op.index))
op.index = *newIndex;
else
LUAU_ASSERT(!"Values can only be used if they are defined in the same block");
}
};
if (removeCurrentTerminator && inTerminatedBlock)
{
IrBlock& active = function.blocks[activeBlockIdx];
IrInst& term = function.instructions[active.finish];
kill(function, term);
inTerminatedBlock = false;
}
for (uint32_t index = source.start; index <= source.finish; index++)
{
LUAU_ASSERT(index < function.instructions.size());
IrInst clone = function.instructions[index];
// Skip pseudo instructions to make clone more compact, but validate that they have no users
if (isPseudo(clone.cmd))
{
LUAU_ASSERT(clone.useCount == 0);
continue;
}
redirect(clone.a);
redirect(clone.b);
redirect(clone.c);
redirect(clone.d);
redirect(clone.e);
redirect(clone.f);
addUse(function, clone.a);
addUse(function, clone.b);
addUse(function, clone.c);
addUse(function, clone.d);
addUse(function, clone.e);
addUse(function, clone.f);
// Instructions that referenced the original will have to be adjusted to use the clone
instRedir[index] = uint32_t(function.instructions.size());
// Reconstruct the fresh clone
inst(clone.cmd, clone.a, clone.b, clone.c, clone.d, clone.e, clone.f);
}
}
IrOp IrBuilder::undef()
{
return {IrOpKind::Undef, 0};
}
IrOp IrBuilder::constInt(int value)
{
IrConst constant;
constant.kind = IrConstKind::Int;
constant.valueInt = value;
return constAny(constant, uint64_t(value));
}
IrOp IrBuilder::constUint(unsigned value)
{
IrConst constant;
constant.kind = IrConstKind::Uint;
constant.valueUint = value;
return constAny(constant, uint64_t(value));
}
IrOp IrBuilder::constDouble(double value)
{
IrConst constant;
constant.kind = IrConstKind::Double;
constant.valueDouble = value;
uint64_t asCommonKey;
static_assert(sizeof(asCommonKey) == sizeof(value), "Expecting double to be 64-bit");
memcpy(&asCommonKey, &value, sizeof(value));
return constAny(constant, asCommonKey);
}
IrOp IrBuilder::constTag(uint8_t value)
{
IrConst constant;
constant.kind = IrConstKind::Tag;
constant.valueTag = value;
return constAny(constant, uint64_t(value));
}
IrOp IrBuilder::constAny(IrConst constant, uint64_t asCommonKey)
{
ConstantKey key{constant.kind, asCommonKey};
if (uint32_t* cache = constantMap.find(key))
return {IrOpKind::Constant, *cache};
uint32_t index = uint32_t(function.constants.size());
function.constants.push_back(constant);
constantMap[key] = index;
return {IrOpKind::Constant, index};
}
IrOp IrBuilder::cond(IrCondition cond)
{
return {IrOpKind::Condition, uint32_t(cond)};
}
IrOp IrBuilder::inst(IrCmd cmd)
{
return inst(cmd, {}, {}, {}, {}, {}, {});
}
IrOp IrBuilder::inst(IrCmd cmd, IrOp a)
{
return inst(cmd, a, {}, {}, {}, {}, {});
}
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b)
{
return inst(cmd, a, b, {}, {}, {}, {});
}
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c)
{
return inst(cmd, a, b, c, {}, {}, {});
}
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d)
{
return inst(cmd, a, b, c, d, {}, {});
}
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d, IrOp e)
{
return inst(cmd, a, b, c, d, e, {});
}
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d, IrOp e, IrOp f)
{
uint32_t index = uint32_t(function.instructions.size());
function.instructions.push_back({cmd, a, b, c, d, e, f});
LUAU_ASSERT(!inTerminatedBlock);
if (isBlockTerminator(cmd))
{
function.blocks[activeBlockIdx].finish = index;
inTerminatedBlock = true;
}
return {IrOpKind::Inst, index};
}
IrOp IrBuilder::block(IrBlockKind kind)
{
if (kind == IrBlockKind::Internal && activeFastcallFallback)
kind = IrBlockKind::Fallback;
uint32_t index = uint32_t(function.blocks.size());
function.blocks.push_back(IrBlock{kind});
return IrOp{IrOpKind::Block, index};
}
IrOp IrBuilder::blockAtInst(uint32_t index)
{
uint32_t blockIndex = instIndexToBlock[index];
if (blockIndex != kNoAssociatedBlockIndex)
return IrOp{IrOpKind::Block, blockIndex};
return block(IrBlockKind::Internal);
}
IrOp IrBuilder::vmReg(uint8_t index)
{
return {IrOpKind::VmReg, index};
}
IrOp IrBuilder::vmConst(uint32_t index)
{
return {IrOpKind::VmConst, index};
}
IrOp IrBuilder::vmUpvalue(uint8_t index)
{
return {IrOpKind::VmUpvalue, index};
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,431 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/IrCallWrapperX64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/IrRegAllocX64.h"
#include "EmitCommonX64.h"
namespace Luau
{
namespace CodeGen
{
namespace X64
{
static const std::array<OperandX64, 6> kWindowsGprOrder = {rcx, rdx, r8, r9, addr[rsp + 32], addr[rsp + 40]};
static const std::array<OperandX64, 6> kSystemvGprOrder = {rdi, rsi, rdx, rcx, r8, r9};
static const std::array<OperandX64, 4> kXmmOrder = {xmm0, xmm1, xmm2, xmm3}; // Common order for first 4 fp arguments on Windows/SystemV
static bool sameUnderlyingRegister(RegisterX64 a, RegisterX64 b)
{
SizeX64 underlyingSizeA = a.size == SizeX64::xmmword ? SizeX64::xmmword : SizeX64::qword;
SizeX64 underlyingSizeB = b.size == SizeX64::xmmword ? SizeX64::xmmword : SizeX64::qword;
return underlyingSizeA == underlyingSizeB && a.index == b.index;
}
IrCallWrapperX64::IrCallWrapperX64(IrRegAllocX64& regs, AssemblyBuilderX64& build, uint32_t instIdx)
: regs(regs)
, build(build)
, instIdx(instIdx)
, funcOp(noreg)
{
gprUses.fill(0);
xmmUses.fill(0);
}
void IrCallWrapperX64::addArgument(SizeX64 targetSize, OperandX64 source, IrOp sourceOp)
{
// Instruction operands rely on current instruction index for lifetime tracking
LUAU_ASSERT(instIdx != kInvalidInstIdx || sourceOp.kind == IrOpKind::None);
LUAU_ASSERT(argCount < kMaxCallArguments);
CallArgument& arg = args[argCount++];
arg = {targetSize, source, sourceOp};
arg.target = getNextArgumentTarget(targetSize);
if (build.abi == ABIX64::Windows)
{
// On Windows, gpr/xmm register positions move in sync
gprPos++;
xmmPos++;
}
else
{
if (targetSize == SizeX64::xmmword)
xmmPos++;
else
gprPos++;
}
}
void IrCallWrapperX64::addArgument(SizeX64 targetSize, ScopedRegX64& scopedReg)
{
addArgument(targetSize, scopedReg.release(), {});
}
void IrCallWrapperX64::call(const OperandX64& func)
{
funcOp = func;
countRegisterUses();
for (int i = 0; i < argCount; ++i)
{
CallArgument& arg = args[i];
if (arg.sourceOp.kind != IrOpKind::None)
{
if (IrInst* inst = regs.function.asInstOp(arg.sourceOp))
{
// Source registers are recorded separately from source operands in CallArgument
// If source is the last use of IrInst, clear the register from the operand
if (regs.isLastUseReg(*inst, instIdx))
inst->regX64 = noreg;
// If it's not the last use and register is volatile, register ownership is taken, which also spills the operand
else if (inst->regX64.size == SizeX64::xmmword || regs.shouldFreeGpr(inst->regX64))
regs.takeReg(inst->regX64, kInvalidInstIdx);
}
}
// Immediate values are stored at the end since they are not interfering and target register can still be used temporarily
if (arg.source.cat == CategoryX64::imm)
{
arg.candidate = false;
}
// Arguments passed through stack can be handled immediately
else if (arg.target.cat == CategoryX64::mem)
{
if (arg.source.cat == CategoryX64::mem)
{
ScopedRegX64 tmp{regs, arg.target.memSize};
freeSourceRegisters(arg);
if (arg.source.memSize == SizeX64::none)
build.lea(tmp.reg, arg.source);
else
build.mov(tmp.reg, arg.source);
build.mov(arg.target, tmp.reg);
}
else
{
freeSourceRegisters(arg);
build.mov(arg.target, arg.source);
}
arg.candidate = false;
}
// Skip arguments that are already in their place
else if (arg.source.cat == CategoryX64::reg && sameUnderlyingRegister(arg.target.base, arg.source.base))
{
freeSourceRegisters(arg);
// If target is not used as source in other arguments, prevent register allocator from giving it out
if (getRegisterUses(arg.target.base) == 0)
regs.takeReg(arg.target.base, kInvalidInstIdx);
else // Otherwise, make sure we won't free it when last source use is completed
addRegisterUse(arg.target.base);
arg.candidate = false;
}
}
// Repeat until we run out of arguments to pass
while (true)
{
// Find target argument register that is not an active source
if (CallArgument* candidate = findNonInterferingArgument())
{
// This section is only for handling register targets
LUAU_ASSERT(candidate->target.cat == CategoryX64::reg);
freeSourceRegisters(*candidate);
LUAU_ASSERT(getRegisterUses(candidate->target.base) == 0);
regs.takeReg(candidate->target.base, kInvalidInstIdx);
moveToTarget(*candidate);
candidate->candidate = false;
}
// If all registers cross-interfere (rcx <- rdx, rdx <- rcx), one has to be renamed
else if (RegisterX64 conflict = findConflictingTarget(); conflict != noreg)
{
renameConflictingRegister(conflict);
}
else
{
for (int i = 0; i < argCount; ++i)
LUAU_ASSERT(!args[i].candidate);
break;
}
}
// Handle immediate arguments last
for (int i = 0; i < argCount; ++i)
{
CallArgument& arg = args[i];
if (arg.source.cat == CategoryX64::imm)
{
// There could be a conflict with the function source register, make this argument a candidate to find it
arg.candidate = true;
if (RegisterX64 conflict = findConflictingTarget(); conflict != noreg)
renameConflictingRegister(conflict);
if (arg.target.cat == CategoryX64::reg)
regs.takeReg(arg.target.base, kInvalidInstIdx);
moveToTarget(arg);
arg.candidate = false;
}
}
// Free registers used in the function call
removeRegisterUse(funcOp.base);
removeRegisterUse(funcOp.index);
// Just before the call is made, argument registers are all marked as free in register allocator
for (int i = 0; i < argCount; ++i)
{
CallArgument& arg = args[i];
if (arg.target.cat == CategoryX64::reg)
regs.freeReg(arg.target.base);
}
regs.preserveAndFreeInstValues();
regs.assertAllFree();
build.call(funcOp);
}
RegisterX64 IrCallWrapperX64::suggestNextArgumentRegister(SizeX64 size) const
{
OperandX64 target = getNextArgumentTarget(size);
return target.cat == CategoryX64::reg ? regs.takeReg(target.base, kInvalidInstIdx) : regs.allocReg(size, kInvalidInstIdx);
}
OperandX64 IrCallWrapperX64::getNextArgumentTarget(SizeX64 size) const
{
if (size == SizeX64::xmmword)
{
LUAU_ASSERT(size_t(xmmPos) < kXmmOrder.size());
return kXmmOrder[xmmPos];
}
const std::array<OperandX64, 6>& gprOrder = build.abi == ABIX64::Windows ? kWindowsGprOrder : kSystemvGprOrder;
LUAU_ASSERT(size_t(gprPos) < gprOrder.size());
OperandX64 target = gprOrder[gprPos];
// Keep requested argument size
if (target.cat == CategoryX64::reg)
target.base.size = size;
else if (target.cat == CategoryX64::mem)
target.memSize = size;
return target;
}
void IrCallWrapperX64::countRegisterUses()
{
for (int i = 0; i < argCount; ++i)
{
addRegisterUse(args[i].source.base);
addRegisterUse(args[i].source.index);
}
addRegisterUse(funcOp.base);
addRegisterUse(funcOp.index);
}
CallArgument* IrCallWrapperX64::findNonInterferingArgument()
{
for (int i = 0; i < argCount; ++i)
{
CallArgument& arg = args[i];
if (arg.candidate && !interferesWithActiveSources(arg, i) && !interferesWithOperand(funcOp, arg.target.base))
return &arg;
}
return nullptr;
}
bool IrCallWrapperX64::interferesWithOperand(const OperandX64& op, RegisterX64 reg) const
{
return sameUnderlyingRegister(op.base, reg) || sameUnderlyingRegister(op.index, reg);
}
bool IrCallWrapperX64::interferesWithActiveSources(const CallArgument& targetArg, int targetArgIndex) const
{
for (int i = 0; i < argCount; ++i)
{
const CallArgument& arg = args[i];
if (arg.candidate && i != targetArgIndex && interferesWithOperand(arg.source, targetArg.target.base))
return true;
}
return false;
}
bool IrCallWrapperX64::interferesWithActiveTarget(RegisterX64 sourceReg) const
{
for (int i = 0; i < argCount; ++i)
{
const CallArgument& arg = args[i];
if (arg.candidate && sameUnderlyingRegister(arg.target.base, sourceReg))
return true;
}
return false;
}
void IrCallWrapperX64::moveToTarget(CallArgument& arg)
{
if (arg.source.cat == CategoryX64::reg)
{
RegisterX64 source = arg.source.base;
if (source.size == SizeX64::xmmword)
build.vmovsd(arg.target, source, source);
else
build.mov(arg.target, source);
}
else if (arg.source.cat == CategoryX64::imm)
{
build.mov(arg.target, arg.source);
}
else
{
if (arg.source.memSize == SizeX64::none)
build.lea(arg.target, arg.source);
else if (arg.target.base.size == SizeX64::xmmword && arg.source.memSize == SizeX64::xmmword)
build.vmovups(arg.target, arg.source);
else if (arg.target.base.size == SizeX64::xmmword)
build.vmovsd(arg.target, arg.source);
else
build.mov(arg.target, arg.source);
}
}
void IrCallWrapperX64::freeSourceRegisters(CallArgument& arg)
{
removeRegisterUse(arg.source.base);
removeRegisterUse(arg.source.index);
}
void IrCallWrapperX64::renameRegister(RegisterX64& target, RegisterX64 reg, RegisterX64 replacement)
{
if (sameUnderlyingRegister(target, reg))
{
addRegisterUse(replacement);
removeRegisterUse(target);
target.index = replacement.index; // Only change index, size is preserved
}
}
void IrCallWrapperX64::renameSourceRegisters(RegisterX64 reg, RegisterX64 replacement)
{
for (int i = 0; i < argCount; ++i)
{
CallArgument& arg = args[i];
if (arg.candidate)
{
renameRegister(arg.source.base, reg, replacement);
renameRegister(arg.source.index, reg, replacement);
}
}
renameRegister(funcOp.base, reg, replacement);
renameRegister(funcOp.index, reg, replacement);
}
RegisterX64 IrCallWrapperX64::findConflictingTarget() const
{
for (int i = 0; i < argCount; ++i)
{
const CallArgument& arg = args[i];
if (arg.candidate)
{
if (interferesWithActiveTarget(arg.source.base))
return arg.source.base;
if (interferesWithActiveTarget(arg.source.index))
return arg.source.index;
}
}
if (interferesWithActiveTarget(funcOp.base))
return funcOp.base;
if (interferesWithActiveTarget(funcOp.index))
return funcOp.index;
return noreg;
}
void IrCallWrapperX64::renameConflictingRegister(RegisterX64 conflict)
{
// Get a fresh register
RegisterX64 freshReg = regs.allocReg(conflict.size, kInvalidInstIdx);
if (conflict.size == SizeX64::xmmword)
build.vmovsd(freshReg, conflict, conflict);
else
build.mov(freshReg, conflict);
renameSourceRegisters(conflict, freshReg);
}
int IrCallWrapperX64::getRegisterUses(RegisterX64 reg) const
{
return reg.size == SizeX64::xmmword ? xmmUses[reg.index] : (reg.size != SizeX64::none ? gprUses[reg.index] : 0);
}
void IrCallWrapperX64::addRegisterUse(RegisterX64 reg)
{
if (reg.size == SizeX64::xmmword)
xmmUses[reg.index]++;
else if (reg.size != SizeX64::none)
gprUses[reg.index]++;
}
void IrCallWrapperX64::removeRegisterUse(RegisterX64 reg)
{
if (reg.size == SizeX64::xmmword)
{
LUAU_ASSERT(xmmUses[reg.index] != 0);
xmmUses[reg.index]--;
if (xmmUses[reg.index] == 0) // we don't use persistent xmm regs so no need to call shouldFreeRegister
regs.freeReg(reg);
}
else if (reg.size != SizeX64::none)
{
LUAU_ASSERT(gprUses[reg.index] != 0);
gprUses[reg.index]--;
if (gprUses[reg.index] == 0 && regs.shouldFreeGpr(reg))
regs.freeReg(reg);
}
}
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,874 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/IrDump.h"
#include "Luau/IrUtils.h"
#include "lua.h"
#include <stdarg.h>
namespace Luau
{
namespace CodeGen
{
static const char* textForCondition[] = {
"eq", "not_eq", "lt", "not_lt", "le", "not_le", "gt", "not_gt", "ge", "not_ge", "u_lt", "u_le", "u_gt", "u_ge"};
static_assert(sizeof(textForCondition) / sizeof(textForCondition[0]) == size_t(IrCondition::Count), "all conditions have to be covered");
const int kDetailsAlignColumn = 60;
LUAU_PRINTF_ATTR(2, 3)
static void append(std::string& result, const char* fmt, ...)
{
char buf[256];
va_list args;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
result.append(buf);
}
static void padToDetailColumn(std::string& result, size_t lineStart)
{
int pad = kDetailsAlignColumn - int(result.size() - lineStart);
if (pad > 0)
result.append(pad, ' ');
}
static const char* getTagName(uint8_t tag)
{
switch (tag)
{
case LUA_TNIL:
return "tnil";
case LUA_TBOOLEAN:
return "tboolean";
case LUA_TLIGHTUSERDATA:
return "tlightuserdata";
case LUA_TNUMBER:
return "tnumber";
case LUA_TVECTOR:
return "tvector";
case LUA_TSTRING:
return "tstring";
case LUA_TTABLE:
return "ttable";
case LUA_TFUNCTION:
return "tfunction";
case LUA_TUSERDATA:
return "tuserdata";
case LUA_TTHREAD:
return "tthread";
default:
LUAU_ASSERT(!"Unknown type tag");
LUAU_UNREACHABLE();
}
}
const char* getCmdName(IrCmd cmd)
{
switch (cmd)
{
case IrCmd::NOP:
return "NOP";
case IrCmd::LOAD_TAG:
return "LOAD_TAG";
case IrCmd::LOAD_POINTER:
return "LOAD_POINTER";
case IrCmd::LOAD_DOUBLE:
return "LOAD_DOUBLE";
case IrCmd::LOAD_INT:
return "LOAD_INT";
case IrCmd::LOAD_TVALUE:
return "LOAD_TVALUE";
case IrCmd::LOAD_NODE_VALUE_TV:
return "LOAD_NODE_VALUE_TV";
case IrCmd::LOAD_ENV:
return "LOAD_ENV";
case IrCmd::GET_ARR_ADDR:
return "GET_ARR_ADDR";
case IrCmd::GET_SLOT_NODE_ADDR:
return "GET_SLOT_NODE_ADDR";
case IrCmd::GET_HASH_NODE_ADDR:
return "GET_HASH_NODE_ADDR";
case IrCmd::STORE_TAG:
return "STORE_TAG";
case IrCmd::STORE_POINTER:
return "STORE_POINTER";
case IrCmd::STORE_DOUBLE:
return "STORE_DOUBLE";
case IrCmd::STORE_INT:
return "STORE_INT";
case IrCmd::STORE_VECTOR:
return "STORE_VECTOR";
case IrCmd::STORE_TVALUE:
return "STORE_TVALUE";
case IrCmd::STORE_NODE_VALUE_TV:
return "STORE_NODE_VALUE_TV";
case IrCmd::ADD_INT:
return "ADD_INT";
case IrCmd::SUB_INT:
return "SUB_INT";
case IrCmd::ADD_NUM:
return "ADD_NUM";
case IrCmd::SUB_NUM:
return "SUB_NUM";
case IrCmd::MUL_NUM:
return "MUL_NUM";
case IrCmd::DIV_NUM:
return "DIV_NUM";
case IrCmd::MOD_NUM:
return "MOD_NUM";
case IrCmd::MIN_NUM:
return "MIN_NUM";
case IrCmd::MAX_NUM:
return "MAX_NUM";
case IrCmd::UNM_NUM:
return "UNM_NUM";
case IrCmd::FLOOR_NUM:
return "FLOOR_NUM";
case IrCmd::CEIL_NUM:
return "CEIL_NUM";
case IrCmd::ROUND_NUM:
return "ROUND_NUM";
case IrCmd::SQRT_NUM:
return "SQRT_NUM";
case IrCmd::ABS_NUM:
return "ABS_NUM";
case IrCmd::NOT_ANY:
return "NOT_ANY";
case IrCmd::JUMP:
return "JUMP";
case IrCmd::JUMP_IF_TRUTHY:
return "JUMP_IF_TRUTHY";
case IrCmd::JUMP_IF_FALSY:
return "JUMP_IF_FALSY";
case IrCmd::JUMP_EQ_TAG:
return "JUMP_EQ_TAG";
case IrCmd::JUMP_EQ_INT:
return "JUMP_EQ_INT";
case IrCmd::JUMP_LT_INT:
return "JUMP_LT_INT";
case IrCmd::JUMP_GE_UINT:
return "JUMP_GE_UINT";
case IrCmd::JUMP_EQ_POINTER:
return "JUMP_EQ_POINTER";
case IrCmd::JUMP_CMP_NUM:
return "JUMP_CMP_NUM";
case IrCmd::JUMP_CMP_ANY:
return "JUMP_CMP_ANY";
case IrCmd::JUMP_SLOT_MATCH:
return "JUMP_SLOT_MATCH";
case IrCmd::TABLE_LEN:
return "TABLE_LEN";
case IrCmd::NEW_TABLE:
return "NEW_TABLE";
case IrCmd::DUP_TABLE:
return "DUP_TABLE";
case IrCmd::TRY_NUM_TO_INDEX:
return "TRY_NUM_TO_INDEX";
case IrCmd::TRY_CALL_FASTGETTM:
return "TRY_CALL_FASTGETTM";
case IrCmd::INT_TO_NUM:
return "INT_TO_NUM";
case IrCmd::UINT_TO_NUM:
return "UINT_TO_NUM";
case IrCmd::NUM_TO_INT:
return "NUM_TO_INT";
case IrCmd::NUM_TO_UINT:
return "NUM_TO_UINT";
case IrCmd::ADJUST_STACK_TO_REG:
return "ADJUST_STACK_TO_REG";
case IrCmd::ADJUST_STACK_TO_TOP:
return "ADJUST_STACK_TO_TOP";
case IrCmd::FASTCALL:
return "FASTCALL";
case IrCmd::INVOKE_FASTCALL:
return "INVOKE_FASTCALL";
case IrCmd::CHECK_FASTCALL_RES:
return "CHECK_FASTCALL_RES";
case IrCmd::DO_ARITH:
return "DO_ARITH";
case IrCmd::DO_LEN:
return "DO_LEN";
case IrCmd::GET_TABLE:
return "GET_TABLE";
case IrCmd::SET_TABLE:
return "SET_TABLE";
case IrCmd::GET_IMPORT:
return "GET_IMPORT";
case IrCmd::CONCAT:
return "CONCAT";
case IrCmd::GET_UPVALUE:
return "GET_UPVALUE";
case IrCmd::SET_UPVALUE:
return "SET_UPVALUE";
case IrCmd::PREPARE_FORN:
return "PREPARE_FORN";
case IrCmd::CHECK_TAG:
return "CHECK_TAG";
case IrCmd::CHECK_READONLY:
return "CHECK_READONLY";
case IrCmd::CHECK_NO_METATABLE:
return "CHECK_NO_METATABLE";
case IrCmd::CHECK_SAFE_ENV:
return "CHECK_SAFE_ENV";
case IrCmd::CHECK_ARRAY_SIZE:
return "CHECK_ARRAY_SIZE";
case IrCmd::CHECK_SLOT_MATCH:
return "CHECK_SLOT_MATCH";
case IrCmd::CHECK_NODE_NO_NEXT:
return "CHECK_NODE_NO_NEXT";
case IrCmd::INTERRUPT:
return "INTERRUPT";
case IrCmd::CHECK_GC:
return "CHECK_GC";
case IrCmd::BARRIER_OBJ:
return "BARRIER_OBJ";
case IrCmd::BARRIER_TABLE_BACK:
return "BARRIER_TABLE_BACK";
case IrCmd::BARRIER_TABLE_FORWARD:
return "BARRIER_TABLE_FORWARD";
case IrCmd::SET_SAVEDPC:
return "SET_SAVEDPC";
case IrCmd::CLOSE_UPVALS:
return "CLOSE_UPVALS";
case IrCmd::CAPTURE:
return "CAPTURE";
case IrCmd::SETLIST:
return "SETLIST";
case IrCmd::CALL:
return "CALL";
case IrCmd::RETURN:
return "RETURN";
case IrCmd::FORGLOOP:
return "FORGLOOP";
case IrCmd::FORGLOOP_FALLBACK:
return "FORGLOOP_FALLBACK";
case IrCmd::FORGPREP_XNEXT_FALLBACK:
return "FORGPREP_XNEXT_FALLBACK";
case IrCmd::COVERAGE:
return "COVERAGE";
case IrCmd::FALLBACK_GETGLOBAL:
return "FALLBACK_GETGLOBAL";
case IrCmd::FALLBACK_SETGLOBAL:
return "FALLBACK_SETGLOBAL";
case IrCmd::FALLBACK_GETTABLEKS:
return "FALLBACK_GETTABLEKS";
case IrCmd::FALLBACK_SETTABLEKS:
return "FALLBACK_SETTABLEKS";
case IrCmd::FALLBACK_NAMECALL:
return "FALLBACK_NAMECALL";
case IrCmd::FALLBACK_PREPVARARGS:
return "FALLBACK_PREPVARARGS";
case IrCmd::FALLBACK_GETVARARGS:
return "FALLBACK_GETVARARGS";
case IrCmd::FALLBACK_NEWCLOSURE:
return "FALLBACK_NEWCLOSURE";
case IrCmd::FALLBACK_DUPCLOSURE:
return "FALLBACK_DUPCLOSURE";
case IrCmd::FALLBACK_FORGPREP:
return "FALLBACK_FORGPREP";
case IrCmd::SUBSTITUTE:
return "SUBSTITUTE";
case IrCmd::BITAND_UINT:
return "BITAND_UINT";
case IrCmd::BITXOR_UINT:
return "BITXOR_UINT";
case IrCmd::BITOR_UINT:
return "BITOR_UINT";
case IrCmd::BITNOT_UINT:
return "BITNOT_UINT";
case IrCmd::BITLSHIFT_UINT:
return "BITLSHIFT_UINT";
case IrCmd::BITRSHIFT_UINT:
return "BITRSHIFT_UINT";
case IrCmd::BITARSHIFT_UINT:
return "BITARSHIFT_UINT";
case IrCmd::BITLROTATE_UINT:
return "BITLROTATE_UINT";
case IrCmd::BITRROTATE_UINT:
return "BITRROTATE_UINT";
case IrCmd::BITCOUNTLZ_UINT:
return "BITCOUNTLZ_UINT";
case IrCmd::BITCOUNTRZ_UINT:
return "BITCOUNTRZ_UINT";
case IrCmd::INVOKE_LIBM:
return "INVOKE_LIBM";
}
LUAU_UNREACHABLE();
}
const char* getBlockKindName(IrBlockKind kind)
{
switch (kind)
{
case IrBlockKind::Bytecode:
return "bb_bytecode";
case IrBlockKind::Fallback:
return "bb_fallback";
case IrBlockKind::Internal:
return "bb";
case IrBlockKind::Linearized:
return "bb_linear";
case IrBlockKind::Dead:
return "dead";
}
LUAU_UNREACHABLE();
}
void toString(IrToStringContext& ctx, const IrInst& inst, uint32_t index)
{
append(ctx.result, " ");
// Instructions with a result display target virtual register
if (hasResult(inst.cmd))
append(ctx.result, "%%%u = ", index);
ctx.result.append(getCmdName(inst.cmd));
auto checkOp = [&ctx](IrOp op, const char* sep) {
if (op.kind != IrOpKind::None)
{
ctx.result.append(sep);
toString(ctx, op);
}
};
checkOp(inst.a, " ");
checkOp(inst.b, ", ");
checkOp(inst.c, ", ");
checkOp(inst.d, ", ");
checkOp(inst.e, ", ");
checkOp(inst.f, ", ");
}
void toString(IrToStringContext& ctx, const IrBlock& block, uint32_t index)
{
append(ctx.result, "%s_%u", getBlockKindName(block.kind), index);
}
void toString(IrToStringContext& ctx, IrOp op)
{
switch (op.kind)
{
case IrOpKind::None:
break;
case IrOpKind::Undef:
append(ctx.result, "undef");
break;
case IrOpKind::Constant:
toString(ctx.result, ctx.constants[op.index]);
break;
case IrOpKind::Condition:
LUAU_ASSERT(op.index < uint32_t(IrCondition::Count));
ctx.result.append(textForCondition[op.index]);
break;
case IrOpKind::Inst:
append(ctx.result, "%%%u", op.index);
break;
case IrOpKind::Block:
append(ctx.result, "%s_%u", getBlockKindName(ctx.blocks[op.index].kind), op.index);
break;
case IrOpKind::VmReg:
append(ctx.result, "R%d", vmRegOp(op));
break;
case IrOpKind::VmConst:
append(ctx.result, "K%d", vmConstOp(op));
break;
case IrOpKind::VmUpvalue:
append(ctx.result, "U%d", vmUpvalueOp(op));
break;
}
}
void toString(std::string& result, IrConst constant)
{
switch (constant.kind)
{
case IrConstKind::Int:
append(result, "%di", constant.valueInt);
break;
case IrConstKind::Uint:
append(result, "%uu", constant.valueUint);
break;
case IrConstKind::Double:
if (constant.valueDouble != constant.valueDouble)
append(result, "nan");
else
append(result, "%.17g", constant.valueDouble);
break;
case IrConstKind::Tag:
result.append(getTagName(constant.valueTag));
break;
}
}
static void appendBlockSet(IrToStringContext& ctx, BlockIteratorWrapper blocks)
{
bool comma = false;
for (uint32_t target : blocks)
{
if (comma)
append(ctx.result, ", ");
comma = true;
toString(ctx, ctx.blocks[target], target);
}
}
static void appendRegisterSet(IrToStringContext& ctx, const RegisterSet& rs, const char* separator)
{
bool comma = false;
for (size_t i = 0; i < rs.regs.size(); i++)
{
if (rs.regs.test(i))
{
if (comma)
ctx.result.append(separator);
comma = true;
append(ctx.result, "R%d", int(i));
}
}
if (rs.varargSeq)
{
if (comma)
ctx.result.append(separator);
append(ctx.result, "R%d...", rs.varargStart);
}
}
static RegisterSet getJumpTargetExtraLiveIn(IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, const IrInst& inst)
{
RegisterSet extraRs;
if (blockIdx >= ctx.cfg.in.size())
return extraRs;
const RegisterSet& defRs = ctx.cfg.in[blockIdx];
// Find first block argument, for guard instructions (isNonTerminatingJump), that's the first and only one
LUAU_ASSERT(isNonTerminatingJump(inst.cmd));
IrOp op = inst.a;
if (inst.b.kind == IrOpKind::Block)
op = inst.b;
else if (inst.c.kind == IrOpKind::Block)
op = inst.c;
else if (inst.d.kind == IrOpKind::Block)
op = inst.d;
else if (inst.e.kind == IrOpKind::Block)
op = inst.e;
else if (inst.f.kind == IrOpKind::Block)
op = inst.f;
if (op.kind == IrOpKind::Block && op.index < ctx.cfg.in.size())
{
const RegisterSet& inRs = ctx.cfg.in[op.index];
extraRs.regs = inRs.regs & ~defRs.regs;
if (inRs.varargSeq)
requireVariadicSequence(extraRs, defRs, inRs.varargStart);
}
return extraRs;
}
void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t blockIdx, const IrInst& inst, uint32_t instIdx, bool includeUseInfo)
{
size_t start = ctx.result.size();
toString(ctx, inst, instIdx);
if (includeUseInfo)
{
padToDetailColumn(ctx.result, start);
if (inst.useCount == 0 && hasSideEffects(inst.cmd))
{
if (isNonTerminatingJump(inst.cmd))
{
RegisterSet extraRs = getJumpTargetExtraLiveIn(ctx, block, blockIdx, inst);
if (extraRs.regs.any() || extraRs.varargSeq)
{
append(ctx.result, "; %%%u, extra in: ", instIdx);
appendRegisterSet(ctx, extraRs, ", ");
ctx.result.append("\n");
}
else
{
append(ctx.result, "; %%%u\n", instIdx);
}
}
else
{
append(ctx.result, "; %%%u\n", instIdx);
}
}
else
{
append(ctx.result, "; useCount: %d, lastUse: %%%u\n", inst.useCount, inst.lastUse);
}
}
else
{
ctx.result.append("\n");
}
}
void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t index, bool includeUseInfo)
{
// Report captured registers for entry block
if (block.useCount == 0 && block.kind != IrBlockKind::Dead && ctx.cfg.captured.regs.any())
{
append(ctx.result, "; captured regs: ");
appendRegisterSet(ctx, ctx.cfg.captured, ", ");
append(ctx.result, "\n\n");
}
size_t start = ctx.result.size();
toString(ctx, block, index);
append(ctx.result, ":");
if (includeUseInfo)
{
padToDetailColumn(ctx.result, start);
append(ctx.result, "; useCount: %d\n", block.useCount);
}
else
{
ctx.result.append("\n");
}
// Predecessor list
if (index < ctx.cfg.predecessorsOffsets.size())
{
BlockIteratorWrapper pred = predecessors(ctx.cfg, index);
if (!pred.empty())
{
append(ctx.result, "; predecessors: ");
appendBlockSet(ctx, pred);
append(ctx.result, "\n");
}
}
// Successor list
if (index < ctx.cfg.successorsOffsets.size())
{
BlockIteratorWrapper succ = successors(ctx.cfg, index);
if (!succ.empty())
{
append(ctx.result, "; successors: ");
appendBlockSet(ctx, succ);
append(ctx.result, "\n");
}
}
// Live-in VM regs
if (index < ctx.cfg.in.size())
{
const RegisterSet& in = ctx.cfg.in[index];
if (in.regs.any() || in.varargSeq)
{
append(ctx.result, "; in regs: ");
appendRegisterSet(ctx, in, ", ");
append(ctx.result, "\n");
}
}
// Live-out VM regs
if (index < ctx.cfg.out.size())
{
const RegisterSet& out = ctx.cfg.out[index];
if (out.regs.any() || out.varargSeq)
{
append(ctx.result, "; out regs: ");
appendRegisterSet(ctx, out, ", ");
append(ctx.result, "\n");
}
}
}
std::string toString(const IrFunction& function, bool includeUseInfo)
{
std::string result;
IrToStringContext ctx{result, function.blocks, function.constants, function.cfg};
for (size_t i = 0; i < function.blocks.size(); i++)
{
const IrBlock& block = function.blocks[i];
if (block.kind == IrBlockKind::Dead)
continue;
toStringDetailed(ctx, block, uint32_t(i), includeUseInfo);
if (block.start == ~0u)
{
append(ctx.result, " *empty*\n\n");
continue;
}
// To allow dumping blocks that are still being constructed, we can't rely on terminator and need a bounds check
for (uint32_t index = block.start; index <= block.finish && index < uint32_t(function.instructions.size()); index++)
{
const IrInst& inst = function.instructions[index];
// Skip pseudo instructions unless they are still referenced
if (isPseudo(inst.cmd) && inst.useCount == 0)
continue;
append(ctx.result, " ");
toStringDetailed(ctx, block, uint32_t(i), inst, index, includeUseInfo);
}
append(ctx.result, "\n");
}
return result;
}
std::string dump(const IrFunction& function)
{
std::string result = toString(function, /* includeUseInfo */ true);
printf("%s\n", result.c_str());
return result;
}
static void appendLabelRegset(IrToStringContext& ctx, const std::vector<RegisterSet>& regSets, size_t blockIdx, const char* name)
{
if (blockIdx < regSets.size())
{
const RegisterSet& rs = regSets[blockIdx];
if (rs.regs.any() || rs.varargSeq)
{
append(ctx.result, "|{%s|", name);
appendRegisterSet(ctx, rs, "|");
append(ctx.result, "}");
}
}
}
static void appendBlocks(IrToStringContext& ctx, const IrFunction& function, bool includeInst, bool includeIn, bool includeOut, bool includeDef)
{
for (size_t i = 0; i < function.blocks.size(); i++)
{
const IrBlock& block = function.blocks[i];
append(ctx.result, "b%u [", unsigned(i));
if (block.kind == IrBlockKind::Fallback)
append(ctx.result, "style=filled;fillcolor=salmon;");
else if (block.kind == IrBlockKind::Bytecode)
append(ctx.result, "style=filled;fillcolor=palegreen;");
append(ctx.result, "label=\"{");
toString(ctx, block, uint32_t(i));
if (includeIn)
appendLabelRegset(ctx, ctx.cfg.in, i, "in");
if (includeInst && block.start != ~0u)
{
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
{
const IrInst& inst = function.instructions[instIdx];
// Skip pseudo instructions unless they are still referenced
if (isPseudo(inst.cmd) && inst.useCount == 0)
continue;
append(ctx.result, "|");
toString(ctx, inst, instIdx);
}
}
if (includeDef)
appendLabelRegset(ctx, ctx.cfg.def, i, "def");
if (includeOut)
appendLabelRegset(ctx, ctx.cfg.out, i, "out");
append(ctx.result, "}\"];\n");
}
}
std::string toDot(const IrFunction& function, bool includeInst)
{
std::string result;
IrToStringContext ctx{result, function.blocks, function.constants, function.cfg};
append(ctx.result, "digraph CFG {\n");
append(ctx.result, "node[shape=record]\n");
appendBlocks(ctx, function, includeInst, /* includeIn */ true, /* includeOut */ true, /* includeDef */ true);
for (size_t i = 0; i < function.blocks.size(); i++)
{
const IrBlock& block = function.blocks[i];
if (block.start == ~0u)
continue;
for (uint32_t instIdx = block.start; instIdx != ~0u && instIdx <= block.finish; instIdx++)
{
const IrInst& inst = function.instructions[instIdx];
auto checkOp = [&](IrOp op) {
if (op.kind == IrOpKind::Block)
{
if (function.blocks[op.index].kind != IrBlockKind::Fallback)
append(ctx.result, "b%u -> b%u [weight=10];\n", unsigned(i), op.index);
else
append(ctx.result, "b%u -> b%u;\n", unsigned(i), op.index);
}
};
checkOp(inst.a);
checkOp(inst.b);
checkOp(inst.c);
checkOp(inst.d);
checkOp(inst.e);
checkOp(inst.f);
}
}
append(ctx.result, "}\n");
return result;
}
std::string toDotCfg(const IrFunction& function)
{
std::string result;
IrToStringContext ctx{result, function.blocks, function.constants, function.cfg};
append(ctx.result, "digraph CFG {\n");
append(ctx.result, "node[shape=record]\n");
appendBlocks(ctx, function, /* includeInst */ false, /* includeIn */ false, /* includeOut */ false, /* includeDef */ true);
for (size_t i = 0; i < function.blocks.size() && i < ctx.cfg.successorsOffsets.size(); i++)
{
BlockIteratorWrapper succ = successors(ctx.cfg, unsigned(i));
for (uint32_t target : succ)
append(ctx.result, "b%u -> b%u;\n", unsigned(i), target);
}
append(ctx.result, "}\n");
return result;
}
std::string toDotDjGraph(const IrFunction& function)
{
std::string result;
IrToStringContext ctx{result, function.blocks, function.constants, function.cfg};
append(ctx.result, "digraph CFG {\n");
for (size_t i = 0; i < ctx.blocks.size(); i++)
{
const IrBlock& block = ctx.blocks[i];
append(ctx.result, "b%u [", unsigned(i));
if (block.kind == IrBlockKind::Fallback)
append(ctx.result, "style=filled;fillcolor=salmon;");
else if (block.kind == IrBlockKind::Bytecode)
append(ctx.result, "style=filled;fillcolor=palegreen;");
append(ctx.result, "label=\"");
toString(ctx, block, uint32_t(i));
append(ctx.result, "\"];\n");
}
// Layer by depth in tree
uint32_t depth = 0;
bool found = true;
while (found)
{
found = false;
append(ctx.result, "{rank = same;");
for (size_t i = 0; i < ctx.cfg.domOrdering.size(); i++)
{
if (ctx.cfg.domOrdering[i].depth == depth)
{
append(ctx.result, "b%u;", unsigned(i));
found = true;
}
}
append(ctx.result, "}\n");
depth++;
}
for (size_t i = 0; i < ctx.cfg.domChildrenOffsets.size(); i++)
{
BlockIteratorWrapper dom = domChildren(ctx.cfg, unsigned(i));
for (uint32_t target : dom)
append(ctx.result, "b%u -> b%u;\n", unsigned(i), target);
// Join edges are all successor edges that do not strongly dominate
BlockIteratorWrapper succ = successors(ctx.cfg, unsigned(i));
for (uint32_t successor : succ)
{
bool found = false;
for (uint32_t target : dom)
{
if (target == successor)
{
found = true;
break;
}
}
if (!found)
append(ctx.result, "b%u -> b%u [style=dotted];\n", unsigned(i), successor);
}
}
append(ctx.result, "}\n");
return result;
}
std::string dumpDot(const IrFunction& function, bool includeInst)
{
std::string result = toDot(function, includeInst);
printf("%s\n", result.c_str());
return result;
}
} // namespace CodeGen
} // namespace Luau

File diff suppressed because it is too large Load Diff

View File

@ -1,81 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/IrData.h"
#include "IrRegAllocA64.h"
#include "IrValueLocationTracking.h"
#include <vector>
namespace Luau
{
namespace CodeGen
{
struct ModuleHelpers;
struct NativeState;
struct AssemblyOptions;
namespace A64
{
struct IrLoweringA64
{
IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, IrBlock& next);
void finishBlock();
void finishFunction();
bool hasError() const;
bool isFallthroughBlock(IrBlock target, IrBlock next);
void jumpOrFallthrough(IrBlock& target, IrBlock& next);
// Operand data build helpers
// May emit data/address synthesis instructions
RegisterA64 tempDouble(IrOp op);
RegisterA64 tempInt(IrOp op);
RegisterA64 tempUint(IrOp op);
AddressA64 tempAddr(IrOp op, int offset);
// May emit restore instructions
RegisterA64 regOp(IrOp op);
// Operand data lookup helpers
IrConst constOp(IrOp op) const;
uint8_t tagOp(IrOp op) const;
int intOp(IrOp op) const;
unsigned uintOp(IrOp op) const;
double doubleOp(IrOp op) const;
IrBlock& blockOp(IrOp op) const;
Label& labelOp(IrOp op) const;
struct InterruptHandler
{
Label self;
unsigned int pcpos;
Label next;
};
AssemblyBuilderA64& build;
ModuleHelpers& helpers;
NativeState& data;
IrFunction& function;
IrRegAllocA64 regs;
IrValueLocationTracking valueTracker;
std::vector<InterruptHandler> interruptHandlers;
bool error = false;
};
} // namespace A64
} // namespace CodeGen
} // namespace Luau

File diff suppressed because it is too large Load Diff

View File

@ -1,79 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/IrData.h"
#include "Luau/IrRegAllocX64.h"
#include "IrValueLocationTracking.h"
#include <vector>
struct Proto;
namespace Luau
{
namespace CodeGen
{
struct ModuleHelpers;
struct NativeState;
struct AssemblyOptions;
namespace X64
{
struct IrLoweringX64
{
IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, IrBlock& next);
void finishBlock();
void finishFunction();
bool hasError() const;
bool isFallthroughBlock(IrBlock target, IrBlock next);
void jumpOrFallthrough(IrBlock& target, IrBlock& next);
void jumpOrAbortOnUndef(ConditionX64 cond, ConditionX64 condInverse, IrOp targetOrUndef);
void storeDoubleAsFloat(OperandX64 dst, IrOp src);
// Operand data lookup helpers
OperandX64 memRegDoubleOp(IrOp op);
OperandX64 memRegUintOp(IrOp op);
OperandX64 memRegTagOp(IrOp op);
RegisterX64 regOp(IrOp op);
IrConst constOp(IrOp op) const;
uint8_t tagOp(IrOp op) const;
int intOp(IrOp op) const;
unsigned uintOp(IrOp op) const;
double doubleOp(IrOp op) const;
IrBlock& blockOp(IrOp op) const;
Label& labelOp(IrOp op) const;
struct InterruptHandler
{
Label self;
unsigned int pcpos;
Label next;
};
AssemblyBuilderX64& build;
ModuleHelpers& helpers;
NativeState& data;
IrFunction& function;
IrRegAllocX64 regs;
IrValueLocationTracking valueTracker;
std::vector<InterruptHandler> interruptHandlers;
};
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,435 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "IrRegAllocA64.h"
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/IrUtils.h"
#include "BitUtils.h"
#include "EmitCommonA64.h"
#include <string.h>
LUAU_FASTFLAGVARIABLE(DebugLuauCodegenChaosA64, false)
namespace Luau
{
namespace CodeGen
{
namespace A64
{
static int allocSpill(uint32_t& free, KindA64 kind)
{
LUAU_ASSERT(kStackSize <= 256); // to support larger stack frames, we need to ensure qN is allocated at 16b boundary to fit in ldr/str encoding
// qN registers use two consecutive slots
int slot = countrz(kind == KindA64::q ? free & (free >> 1) : free);
if (slot == 32)
return -1;
uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot;
LUAU_ASSERT((free & mask) == mask);
free &= ~mask;
return slot;
}
static void freeSpill(uint32_t& free, KindA64 kind, uint8_t slot)
{
// qN registers use two consecutive slots
uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot;
LUAU_ASSERT((free & mask) == 0);
free |= mask;
}
static int getReloadOffset(IrCmd cmd)
{
switch (getCmdValueKind(cmd))
{
case IrValueKind::Unknown:
case IrValueKind::None:
LUAU_ASSERT(!"Invalid operand restore value kind");
break;
case IrValueKind::Tag:
return offsetof(TValue, tt);
case IrValueKind::Int:
return offsetof(TValue, value);
case IrValueKind::Pointer:
return offsetof(TValue, value.gc);
case IrValueKind::Double:
return offsetof(TValue, value.n);
case IrValueKind::Tvalue:
return 0;
}
LUAU_ASSERT(!"Invalid operand restore value kind");
LUAU_UNREACHABLE();
}
static AddressA64 getReloadAddress(const IrFunction& function, const IrInst& inst)
{
IrOp location = function.findRestoreOp(inst);
if (location.kind == IrOpKind::VmReg)
return mem(rBase, vmRegOp(location) * sizeof(TValue) + getReloadOffset(inst.cmd));
// loads are 4/8/16 bytes; we conservatively limit the offset to fit assuming a 4b index
if (location.kind == IrOpKind::VmConst && vmConstOp(location) * sizeof(TValue) <= AddressA64::kMaxOffset * 4)
return mem(rConstants, vmConstOp(location) * sizeof(TValue) + getReloadOffset(inst.cmd));
return AddressA64(xzr); // dummy
}
static void restoreInst(AssemblyBuilderA64& build, uint32_t& freeSpillSlots, IrFunction& function, const IrRegAllocA64::Spill& s, RegisterA64 reg)
{
IrInst& inst = function.instructions[s.inst];
LUAU_ASSERT(inst.regA64 == noreg);
if (s.slot >= 0)
{
build.ldr(reg, mem(sp, sSpillArea.data + s.slot * 8));
freeSpill(freeSpillSlots, reg.kind, s.slot);
}
else
{
LUAU_ASSERT(!inst.spilled && inst.needsReload);
AddressA64 addr = getReloadAddress(function, function.instructions[s.inst]);
LUAU_ASSERT(addr.base != xzr);
build.ldr(reg, addr);
}
inst.spilled = false;
inst.needsReload = false;
inst.regA64 = reg;
}
IrRegAllocA64::IrRegAllocA64(IrFunction& function, std::initializer_list<std::pair<RegisterA64, RegisterA64>> regs)
: function(function)
{
for (auto& p : regs)
{
LUAU_ASSERT(p.first.kind == p.second.kind && p.first.index <= p.second.index);
Set& set = getSet(p.first.kind);
for (int i = p.first.index; i <= p.second.index; ++i)
set.base |= 1u << i;
}
gpr.free = gpr.base;
simd.free = simd.base;
memset(gpr.defs, -1, sizeof(gpr.defs));
memset(simd.defs, -1, sizeof(simd.defs));
LUAU_ASSERT(kSpillSlots <= 32);
freeSpillSlots = (kSpillSlots == 32) ? ~0u : (1u << kSpillSlots) - 1;
}
RegisterA64 IrRegAllocA64::allocReg(KindA64 kind, uint32_t index)
{
Set& set = getSet(kind);
if (set.free == 0)
{
// TODO: remember the error and fail lowering
LUAU_ASSERT(!"Out of registers to allocate");
return noreg;
}
int reg = 31 - countlz(set.free);
if (FFlag::DebugLuauCodegenChaosA64)
reg = countrz(set.free); // allocate from low end; this causes extra conflicts for calls
set.free &= ~(1u << reg);
set.defs[reg] = index;
return RegisterA64{kind, uint8_t(reg)};
}
RegisterA64 IrRegAllocA64::allocTemp(KindA64 kind)
{
Set& set = getSet(kind);
if (set.free == 0)
{
// TODO: remember the error and fail lowering
LUAU_ASSERT(!"Out of registers to allocate");
return noreg;
}
int reg = 31 - countlz(set.free);
if (FFlag::DebugLuauCodegenChaosA64)
reg = countrz(set.free); // allocate from low end; this causes extra conflicts for calls
set.free &= ~(1u << reg);
set.temp |= 1u << reg;
LUAU_ASSERT(set.defs[reg] == kInvalidInstIdx);
return RegisterA64{kind, uint8_t(reg)};
}
RegisterA64 IrRegAllocA64::allocReuse(KindA64 kind, uint32_t index, std::initializer_list<IrOp> oprefs)
{
for (IrOp op : oprefs)
{
if (op.kind != IrOpKind::Inst)
continue;
IrInst& source = function.instructions[op.index];
if (source.lastUse == index && !source.reusedReg && source.regA64 != noreg)
{
LUAU_ASSERT(!source.spilled && !source.needsReload);
LUAU_ASSERT(source.regA64.kind == kind);
Set& set = getSet(kind);
LUAU_ASSERT(set.defs[source.regA64.index] == op.index);
set.defs[source.regA64.index] = index;
source.reusedReg = true;
return source.regA64;
}
}
return allocReg(kind, index);
}
RegisterA64 IrRegAllocA64::takeReg(RegisterA64 reg, uint32_t index)
{
Set& set = getSet(reg.kind);
LUAU_ASSERT(set.free & (1u << reg.index));
LUAU_ASSERT(set.defs[reg.index] == kInvalidInstIdx);
set.free &= ~(1u << reg.index);
set.defs[reg.index] = index;
return reg;
}
void IrRegAllocA64::freeReg(RegisterA64 reg)
{
Set& set = getSet(reg.kind);
LUAU_ASSERT((set.base & (1u << reg.index)) != 0);
LUAU_ASSERT((set.free & (1u << reg.index)) == 0);
LUAU_ASSERT((set.temp & (1u << reg.index)) == 0);
set.free |= 1u << reg.index;
set.defs[reg.index] = kInvalidInstIdx;
}
void IrRegAllocA64::freeLastUseReg(IrInst& target, uint32_t index)
{
if (target.lastUse == index && !target.reusedReg)
{
LUAU_ASSERT(!target.spilled && !target.needsReload);
// Register might have already been freed if it had multiple uses inside a single instruction
if (target.regA64 == noreg)
return;
freeReg(target.regA64);
target.regA64 = noreg;
}
}
void IrRegAllocA64::freeLastUseRegs(const IrInst& inst, uint32_t index)
{
auto checkOp = [this, index](IrOp op) {
if (op.kind == IrOpKind::Inst)
freeLastUseReg(function.instructions[op.index], index);
};
checkOp(inst.a);
checkOp(inst.b);
checkOp(inst.c);
checkOp(inst.d);
checkOp(inst.e);
checkOp(inst.f);
}
void IrRegAllocA64::freeTempRegs()
{
LUAU_ASSERT((gpr.free & gpr.temp) == 0);
gpr.free |= gpr.temp;
gpr.temp = 0;
LUAU_ASSERT((simd.free & simd.temp) == 0);
simd.free |= simd.temp;
simd.temp = 0;
}
size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::initializer_list<RegisterA64> live)
{
static const KindA64 sets[] = {KindA64::x, KindA64::q};
size_t start = spills.size();
uint32_t poisongpr = 0;
uint32_t poisonsimd = 0;
if (FFlag::DebugLuauCodegenChaosA64)
{
poisongpr = gpr.base & ~gpr.free;
poisonsimd = simd.base & ~simd.free;
for (RegisterA64 reg : live)
{
Set& set = getSet(reg.kind);
(&set == &simd ? poisonsimd : poisongpr) &= ~(1u << reg.index);
}
}
for (KindA64 kind : sets)
{
Set& set = getSet(kind);
// early-out
if (set.free == set.base)
continue;
// free all temp registers
LUAU_ASSERT((set.free & set.temp) == 0);
set.free |= set.temp;
set.temp = 0;
// spill all allocated registers unless they aren't used anymore
uint32_t regs = set.base & ~set.free;
while (regs)
{
int reg = 31 - countlz(regs);
uint32_t inst = set.defs[reg];
LUAU_ASSERT(inst != kInvalidInstIdx);
IrInst& def = function.instructions[inst];
LUAU_ASSERT(def.regA64.index == reg);
LUAU_ASSERT(!def.reusedReg);
LUAU_ASSERT(!def.spilled);
LUAU_ASSERT(!def.needsReload);
if (def.lastUse == index)
{
// instead of spilling the register to never reload it, we assume the register is not needed anymore
}
else if (getReloadAddress(function, def).base != xzr)
{
// instead of spilling the register to stack, we can reload it from VM stack/constants
// we still need to record the spill for restore(start) to work
Spill s = {inst, def.regA64, -1};
spills.push_back(s);
def.needsReload = true;
}
else
{
int slot = allocSpill(freeSpillSlots, def.regA64.kind);
LUAU_ASSERT(slot >= 0); // TODO: remember the error and fail lowering
build.str(def.regA64, mem(sp, sSpillArea.data + slot * 8));
Spill s = {inst, def.regA64, int8_t(slot)};
spills.push_back(s);
def.spilled = true;
}
def.regA64 = noreg;
regs &= ~(1u << reg);
set.free |= 1u << reg;
set.defs[reg] = kInvalidInstIdx;
}
LUAU_ASSERT(set.free == set.base);
}
if (FFlag::DebugLuauCodegenChaosA64)
{
for (int reg = 0; reg < 32; ++reg)
{
if (poisongpr & (1u << reg))
build.mov(RegisterA64{KindA64::x, uint8_t(reg)}, 0xdead);
if (poisonsimd & (1u << reg))
build.fmov(RegisterA64{KindA64::d, uint8_t(reg)}, -0.125);
}
}
return start;
}
void IrRegAllocA64::restore(AssemblyBuilderA64& build, size_t start)
{
LUAU_ASSERT(start <= spills.size());
if (start < spills.size())
{
for (size_t i = start; i < spills.size(); ++i)
{
Spill s = spills[i]; // copy in case takeReg reallocates spills
RegisterA64 reg = takeReg(s.origin, s.inst);
restoreInst(build, freeSpillSlots, function, s, reg);
}
spills.resize(start);
}
}
void IrRegAllocA64::restoreReg(AssemblyBuilderA64& build, IrInst& inst)
{
uint32_t index = function.getInstIndex(inst);
for (size_t i = 0; i < spills.size(); ++i)
{
if (spills[i].inst == index)
{
Spill s = spills[i]; // copy in case allocReg reallocates spills
RegisterA64 reg = allocReg(s.origin.kind, index);
restoreInst(build, freeSpillSlots, function, s, reg);
spills[i] = spills.back();
spills.pop_back();
return;
}
}
LUAU_ASSERT(!"Expected to find a spill record");
}
void IrRegAllocA64::assertNoSpills() const
{
LUAU_ASSERT(spills.empty());
}
IrRegAllocA64::Set& IrRegAllocA64::getSet(KindA64 kind)
{
switch (kind)
{
case KindA64::x:
case KindA64::w:
return gpr;
case KindA64::s:
case KindA64::d:
case KindA64::q:
return simd;
default:
LUAU_ASSERT(!"Unexpected register kind");
LUAU_UNREACHABLE();
}
}
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,84 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/IrData.h"
#include "Luau/RegisterA64.h"
#include <initializer_list>
#include <utility>
#include <vector>
namespace Luau
{
namespace CodeGen
{
namespace A64
{
class AssemblyBuilderA64;
struct IrRegAllocA64
{
IrRegAllocA64(IrFunction& function, std::initializer_list<std::pair<RegisterA64, RegisterA64>> regs);
RegisterA64 allocReg(KindA64 kind, uint32_t index);
RegisterA64 allocTemp(KindA64 kind);
RegisterA64 allocReuse(KindA64 kind, uint32_t index, std::initializer_list<IrOp> oprefs);
RegisterA64 takeReg(RegisterA64 reg, uint32_t index);
void freeReg(RegisterA64 reg);
void freeLastUseReg(IrInst& target, uint32_t index);
void freeLastUseRegs(const IrInst& inst, uint32_t index);
void freeTempRegs();
// Spills all live registers that outlive current instruction; all allocated registers are assumed to be undefined
size_t spill(AssemblyBuilderA64& build, uint32_t index, std::initializer_list<RegisterA64> live = {});
// Restores registers starting from the offset returned by spill(); all spills will be restored to the original registers
void restore(AssemblyBuilderA64& build, size_t start);
// Restores register for a single instruction; may not assign the previously used register!
void restoreReg(AssemblyBuilderA64& build, IrInst& inst);
void assertNoSpills() const;
struct Set
{
// which registers are in the set that the allocator manages (initialized at construction)
uint32_t base = 0;
// which subset of initial set is free
uint32_t free = 0;
// which subset of initial set is allocated as temporary
uint32_t temp = 0;
// which instruction is defining which register (for spilling); only valid if not free and not temp
uint32_t defs[32];
};
struct Spill
{
uint32_t inst;
RegisterA64 origin;
int8_t slot;
};
Set& getSet(KindA64 kind);
IrFunction& function;
Set gpr, simd;
std::vector<Spill> spills;
// which 8-byte slots are free
uint32_t freeSpillSlots = 0;
};
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,492 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/IrRegAllocX64.h"
#include "Luau/IrUtils.h"
#include "EmitCommonX64.h"
namespace Luau
{
namespace CodeGen
{
namespace X64
{
static const RegisterX64 kGprAllocOrder[] = {rax, rdx, rcx, rbx, rsi, rdi, r8, r9, r10, r11};
IrRegAllocX64::IrRegAllocX64(AssemblyBuilderX64& build, IrFunction& function)
: build(build)
, function(function)
{
freeGprMap.fill(true);
gprInstUsers.fill(kInvalidInstIdx);
freeXmmMap.fill(true);
xmmInstUsers.fill(kInvalidInstIdx);
}
RegisterX64 IrRegAllocX64::allocReg(SizeX64 size, uint32_t instIdx)
{
if (size == SizeX64::xmmword)
{
for (size_t i = 0; i < freeXmmMap.size(); ++i)
{
if (freeXmmMap[i])
{
freeXmmMap[i] = false;
xmmInstUsers[i] = instIdx;
return RegisterX64{size, uint8_t(i)};
}
}
}
else
{
for (RegisterX64 reg : kGprAllocOrder)
{
if (freeGprMap[reg.index])
{
freeGprMap[reg.index] = false;
gprInstUsers[reg.index] = instIdx;
return RegisterX64{size, reg.index};
}
}
}
// Out of registers, spill the value with the furthest next use
const std::array<uint32_t, 16>& regInstUsers = size == SizeX64::xmmword ? xmmInstUsers : gprInstUsers;
if (uint32_t furthestUseTarget = findInstructionWithFurthestNextUse(regInstUsers); furthestUseTarget != kInvalidInstIdx)
return takeReg(function.instructions[furthestUseTarget].regX64, instIdx);
LUAU_ASSERT(!"Out of registers to allocate");
return noreg;
}
RegisterX64 IrRegAllocX64::allocRegOrReuse(SizeX64 size, uint32_t instIdx, std::initializer_list<IrOp> oprefs)
{
for (IrOp op : oprefs)
{
if (op.kind != IrOpKind::Inst)
continue;
IrInst& source = function.instructions[op.index];
if (source.lastUse == instIdx && !source.reusedReg && !source.spilled && !source.needsReload)
{
// Not comparing size directly because we only need matching register set
if ((size == SizeX64::xmmword) != (source.regX64.size == SizeX64::xmmword))
continue;
LUAU_ASSERT(source.regX64 != noreg);
source.reusedReg = true;
if (size == SizeX64::xmmword)
xmmInstUsers[source.regX64.index] = instIdx;
else
gprInstUsers[source.regX64.index] = instIdx;
return RegisterX64{size, source.regX64.index};
}
}
return allocReg(size, instIdx);
}
RegisterX64 IrRegAllocX64::takeReg(RegisterX64 reg, uint32_t instIdx)
{
if (reg.size == SizeX64::xmmword)
{
if (!freeXmmMap[reg.index])
{
LUAU_ASSERT(xmmInstUsers[reg.index] != kInvalidInstIdx);
preserve(function.instructions[xmmInstUsers[reg.index]]);
}
LUAU_ASSERT(freeXmmMap[reg.index]);
freeXmmMap[reg.index] = false;
xmmInstUsers[reg.index] = instIdx;
}
else
{
if (!freeGprMap[reg.index])
{
LUAU_ASSERT(gprInstUsers[reg.index] != kInvalidInstIdx);
preserve(function.instructions[gprInstUsers[reg.index]]);
}
LUAU_ASSERT(freeGprMap[reg.index]);
freeGprMap[reg.index] = false;
gprInstUsers[reg.index] = instIdx;
}
return reg;
}
void IrRegAllocX64::freeReg(RegisterX64 reg)
{
if (reg.size == SizeX64::xmmword)
{
LUAU_ASSERT(!freeXmmMap[reg.index]);
freeXmmMap[reg.index] = true;
xmmInstUsers[reg.index] = kInvalidInstIdx;
}
else
{
LUAU_ASSERT(!freeGprMap[reg.index]);
freeGprMap[reg.index] = true;
gprInstUsers[reg.index] = kInvalidInstIdx;
}
}
void IrRegAllocX64::freeLastUseReg(IrInst& target, uint32_t instIdx)
{
if (isLastUseReg(target, instIdx))
{
LUAU_ASSERT(!target.spilled && !target.needsReload);
// Register might have already been freed if it had multiple uses inside a single instruction
if (target.regX64 == noreg)
return;
freeReg(target.regX64);
target.regX64 = noreg;
}
}
void IrRegAllocX64::freeLastUseRegs(const IrInst& inst, uint32_t instIdx)
{
auto checkOp = [this, instIdx](IrOp op) {
if (op.kind == IrOpKind::Inst)
freeLastUseReg(function.instructions[op.index], instIdx);
};
checkOp(inst.a);
checkOp(inst.b);
checkOp(inst.c);
checkOp(inst.d);
checkOp(inst.e);
checkOp(inst.f);
}
bool IrRegAllocX64::isLastUseReg(const IrInst& target, uint32_t instIdx) const
{
return target.lastUse == instIdx && !target.reusedReg;
}
void IrRegAllocX64::preserve(IrInst& inst)
{
IrSpillX64 spill;
spill.instIdx = function.getInstIndex(inst);
spill.valueKind = getCmdValueKind(inst.cmd);
spill.spillId = nextSpillId++;
spill.originalLoc = inst.regX64;
// Loads from VmReg/VmConst don't have to be spilled, they can be restored from a register later
if (!hasRestoreOp(inst))
{
unsigned i = findSpillStackSlot(spill.valueKind);
if (spill.valueKind == IrValueKind::Tvalue)
build.vmovups(xmmword[sSpillArea + i * 8], inst.regX64);
else if (spill.valueKind == IrValueKind::Double)
build.vmovsd(qword[sSpillArea + i * 8], inst.regX64);
else if (spill.valueKind == IrValueKind::Pointer)
build.mov(qword[sSpillArea + i * 8], inst.regX64);
else if (spill.valueKind == IrValueKind::Tag || spill.valueKind == IrValueKind::Int)
build.mov(dword[sSpillArea + i * 8], inst.regX64);
else
LUAU_ASSERT(!"Unsupported value kind");
usedSpillSlots.set(i);
if (i + 1 > maxUsedSlot)
maxUsedSlot = i + 1;
if (spill.valueKind == IrValueKind::Tvalue)
{
usedSpillSlots.set(i + 1);
if (i + 2 > maxUsedSlot)
maxUsedSlot = i + 2;
}
spill.stackSlot = uint8_t(i);
inst.spilled = true;
}
else
{
inst.needsReload = true;
}
spills.push_back(spill);
freeReg(inst.regX64);
inst.regX64 = noreg;
}
void IrRegAllocX64::restore(IrInst& inst, bool intoOriginalLocation)
{
uint32_t instIdx = function.getInstIndex(inst);
for (size_t i = 0; i < spills.size(); i++)
{
if (spills[i].instIdx == instIdx)
{
RegisterX64 reg = intoOriginalLocation ? takeReg(spills[i].originalLoc, instIdx) : allocReg(spills[i].originalLoc.size, instIdx);
OperandX64 restoreLocation = noreg;
// Previous call might have relocated the spill vector, so this reference can't be taken earlier
const IrSpillX64& spill = spills[i];
if (spill.stackSlot != kNoStackSlot)
{
restoreLocation = addr[sSpillArea + spill.stackSlot * 8];
restoreLocation.memSize = reg.size;
usedSpillSlots.set(spill.stackSlot, false);
if (spill.valueKind == IrValueKind::Tvalue)
usedSpillSlots.set(spill.stackSlot + 1, false);
}
else
{
restoreLocation = getRestoreAddress(inst, getRestoreOp(inst));
}
if (spill.valueKind == IrValueKind::Tvalue)
build.vmovups(reg, restoreLocation);
else if (spill.valueKind == IrValueKind::Double)
build.vmovsd(reg, restoreLocation);
else
build.mov(reg, restoreLocation);
inst.regX64 = reg;
inst.spilled = false;
inst.needsReload = false;
spills[i] = spills.back();
spills.pop_back();
return;
}
}
}
void IrRegAllocX64::preserveAndFreeInstValues()
{
for (uint32_t instIdx : gprInstUsers)
{
if (instIdx != kInvalidInstIdx)
preserve(function.instructions[instIdx]);
}
for (uint32_t instIdx : xmmInstUsers)
{
if (instIdx != kInvalidInstIdx)
preserve(function.instructions[instIdx]);
}
}
bool IrRegAllocX64::shouldFreeGpr(RegisterX64 reg) const
{
if (reg == noreg)
return false;
LUAU_ASSERT(reg.size != SizeX64::xmmword);
for (RegisterX64 gpr : kGprAllocOrder)
{
if (reg.index == gpr.index)
return true;
}
return false;
}
unsigned IrRegAllocX64::findSpillStackSlot(IrValueKind valueKind)
{
// Find a free stack slot. Two consecutive slots might be required for 16 byte TValues, so '- 1' is used
for (unsigned i = 0; i < unsigned(usedSpillSlots.size() - 1); ++i)
{
if (usedSpillSlots.test(i))
continue;
if (valueKind == IrValueKind::Tvalue && usedSpillSlots.test(i + 1))
{
++i; // No need to retest this double position
continue;
}
return i;
}
LUAU_ASSERT(!"Nowhere to spill");
return ~0u;
}
IrOp IrRegAllocX64::getRestoreOp(const IrInst& inst) const
{
if (IrOp location = function.findRestoreOp(inst); location.kind == IrOpKind::VmReg || location.kind == IrOpKind::VmConst)
return location;
return IrOp();
}
bool IrRegAllocX64::hasRestoreOp(const IrInst& inst) const
{
return getRestoreOp(inst).kind != IrOpKind::None;
}
OperandX64 IrRegAllocX64::getRestoreAddress(const IrInst& inst, IrOp restoreOp)
{
switch (getCmdValueKind(inst.cmd))
{
case IrValueKind::Unknown:
case IrValueKind::None:
LUAU_ASSERT(!"Invalid operand restore value kind");
break;
case IrValueKind::Tag:
return restoreOp.kind == IrOpKind::VmReg ? luauRegTag(vmRegOp(restoreOp)) : luauConstantTag(vmConstOp(restoreOp));
case IrValueKind::Int:
LUAU_ASSERT(restoreOp.kind == IrOpKind::VmReg);
return luauRegValueInt(vmRegOp(restoreOp));
case IrValueKind::Pointer:
return restoreOp.kind == IrOpKind::VmReg ? luauRegValue(vmRegOp(restoreOp)) : luauConstantValue(vmConstOp(restoreOp));
case IrValueKind::Double:
return restoreOp.kind == IrOpKind::VmReg ? luauRegValue(vmRegOp(restoreOp)) : luauConstantValue(vmConstOp(restoreOp));
case IrValueKind::Tvalue:
return restoreOp.kind == IrOpKind::VmReg ? luauReg(vmRegOp(restoreOp)) : luauConstant(vmConstOp(restoreOp));
}
LUAU_ASSERT(!"Failed to find restore operand location");
return noreg;
}
uint32_t IrRegAllocX64::findInstructionWithFurthestNextUse(const std::array<uint32_t, 16>& regInstUsers) const
{
uint32_t furthestUseTarget = kInvalidInstIdx;
uint32_t furthestUseLocation = 0;
for (uint32_t regInstUser : regInstUsers)
{
// Cannot spill temporary registers or the register of the value that's defined in the current instruction
if (regInstUser == kInvalidInstIdx || regInstUser == currInstIdx)
continue;
uint32_t nextUse = getNextInstUse(function, regInstUser, currInstIdx);
// Cannot spill value that is about to be used in the current instruction
if (nextUse == currInstIdx)
continue;
if (furthestUseTarget == kInvalidInstIdx || nextUse > furthestUseLocation)
{
furthestUseLocation = nextUse;
furthestUseTarget = regInstUser;
}
}
return furthestUseTarget;
}
void IrRegAllocX64::assertFree(RegisterX64 reg) const
{
if (reg.size == SizeX64::xmmword)
LUAU_ASSERT(freeXmmMap[reg.index]);
else
LUAU_ASSERT(freeGprMap[reg.index]);
}
void IrRegAllocX64::assertAllFree() const
{
for (RegisterX64 reg : kGprAllocOrder)
LUAU_ASSERT(freeGprMap[reg.index]);
for (bool free : freeXmmMap)
LUAU_ASSERT(free);
}
void IrRegAllocX64::assertNoSpills() const
{
LUAU_ASSERT(spills.empty());
}
ScopedRegX64::ScopedRegX64(IrRegAllocX64& owner)
: owner(owner)
, reg(noreg)
{
}
ScopedRegX64::ScopedRegX64(IrRegAllocX64& owner, SizeX64 size)
: owner(owner)
, reg(noreg)
{
alloc(size);
}
ScopedRegX64::ScopedRegX64(IrRegAllocX64& owner, RegisterX64 reg)
: owner(owner)
, reg(reg)
{
}
ScopedRegX64::~ScopedRegX64()
{
if (reg != noreg)
owner.freeReg(reg);
}
void ScopedRegX64::alloc(SizeX64 size)
{
LUAU_ASSERT(reg == noreg);
reg = owner.allocReg(size, kInvalidInstIdx);
}
void ScopedRegX64::free()
{
LUAU_ASSERT(reg != noreg);
owner.freeReg(reg);
reg = noreg;
}
RegisterX64 ScopedRegX64::release()
{
RegisterX64 tmp = reg;
reg = noreg;
return tmp;
}
ScopedSpills::ScopedSpills(IrRegAllocX64& owner)
: owner(owner)
{
startSpillId = owner.nextSpillId;
}
ScopedSpills::~ScopedSpills()
{
unsigned endSpillId = owner.nextSpillId;
for (size_t i = 0; i < owner.spills.size();)
{
IrSpillX64& spill = owner.spills[i];
// Restoring spills inside this scope cannot create new spills
LUAU_ASSERT(spill.spillId < endSpillId);
// If spill was created inside current scope, it has to be restored
if (spill.spillId >= startSpillId)
{
IrInst& inst = owner.function.instructions[spill.instIdx];
owner.restore(inst, /*intoOriginalLocation*/ true);
// Spill restore removes the spill entry, so loop is repeated at the same 'i'
}
else
{
i++;
}
}
}
} // namespace X64
} // namespace CodeGen
} // namespace Luau

View File

@ -1,827 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "IrTranslateBuiltins.h"
#include "Luau/Bytecode.h"
#include "Luau/IrBuilder.h"
#include "lstate.h"
#include <math.h>
// TODO: when nresults is less than our actual result count, we can skip computing/writing unused results
static const int kMinMaxUnrolledParams = 5;
static const int kBit32BinaryOpUnrolledParams = 5;
namespace Luau
{
namespace CodeGen
{
static void builtinCheckDouble(IrBuilder& build, IrOp arg, IrOp fallback)
{
if (arg.kind == IrOpKind::Constant)
LUAU_ASSERT(build.function.constOp(arg).kind == IrConstKind::Double);
else
build.loadAndCheckTag(arg, LUA_TNUMBER, fallback);
}
static IrOp builtinLoadDouble(IrBuilder& build, IrOp arg)
{
if (arg.kind == IrOpKind::Constant)
return arg;
return build.inst(IrCmd::LOAD_DOUBLE, arg);
}
// Wrapper code for all builtins with a fixed signature and manual assembly lowering of the body
// (number, ...) -> number
static BuiltinImplResult translateBuiltinNumberToNumber(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
build.inst(IrCmd::FASTCALL, build.constUint(bfid), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(1));
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinNumberToNumberLibm(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp res = build.inst(IrCmd::INVOKE_LIBM, build.constUint(bfid), va);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), res);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltin2NumberToNumberLibm(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
IrOp res = build.inst(IrCmd::INVOKE_LIBM, build.constUint(bfid), va, vb);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), res);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinMathLdexp(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
IrOp vbi = build.inst(IrCmd::NUM_TO_INT, vb);
IrOp res = build.inst(IrCmd::INVOKE_LIBM, build.constUint(bfid), va, vbi);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), res);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
// (number, ...) -> (number, number)
static BuiltinImplResult translateBuiltinNumberTo2Number(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 2)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
build.inst(
IrCmd::FASTCALL, build.constUint(bfid), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(nresults == 1 ? 1 : 2));
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
if (nresults != 1)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 1), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 2};
}
static BuiltinImplResult translateBuiltinAssert(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults != 0)
return {BuiltinImplType::None, -1};
IrOp cont = build.block(IrBlockKind::Internal);
// TODO: maybe adding a guard like CHECK_TRUTHY can be useful
build.inst(IrCmd::JUMP_IF_FALSY, build.vmReg(arg), fallback, cont);
build.beginBlock(cont);
return {BuiltinImplType::UsesFallback, 0};
}
static BuiltinImplResult translateBuiltinMathDeg(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
const double rpd = (3.14159265358979323846 / 180.0);
IrOp varg = builtinLoadDouble(build, build.vmReg(arg));
IrOp value = build.inst(IrCmd::DIV_NUM, varg, build.constDouble(rpd));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinMathRad(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
const double rpd = (3.14159265358979323846 / 180.0);
IrOp varg = builtinLoadDouble(build, build.vmReg(arg));
IrOp value = build.inst(IrCmd::MUL_NUM, varg, build.constDouble(rpd));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinMathLog(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
int libmId = bfid;
std::optional<double> denom;
if (nparams != 1)
{
std::optional<double> y = build.function.asDoubleOp(args);
if (!y)
return {BuiltinImplType::None, -1};
if (*y == 2.0)
libmId = LBF_IR_MATH_LOG2;
else if (*y == 10.0)
libmId = LBF_MATH_LOG10;
else
denom = log(*y);
}
builtinCheckDouble(build, build.vmReg(arg), fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp res = build.inst(IrCmd::INVOKE_LIBM, build.constUint(libmId), va);
if (denom)
res = build.inst(IrCmd::DIV_NUM, res, build.constDouble(*denom));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), res);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinMathMin(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nparams > kMinMaxUnrolledParams || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
for (int i = 3; i <= nparams; ++i)
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + (i - 2)), fallback);
IrOp varg1 = builtinLoadDouble(build, build.vmReg(arg));
IrOp varg2 = builtinLoadDouble(build, args);
IrOp res = build.inst(IrCmd::MIN_NUM, varg2, varg1); // Swapped arguments are required for consistency with VM builtins
for (int i = 3; i <= nparams; ++i)
{
IrOp arg = builtinLoadDouble(build, build.vmReg(vmRegOp(args) + (i - 2)));
res = build.inst(IrCmd::MIN_NUM, arg, res);
}
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), res);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinMathMax(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nparams > kMinMaxUnrolledParams || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
for (int i = 3; i <= nparams; ++i)
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + (i - 2)), fallback);
IrOp varg1 = builtinLoadDouble(build, build.vmReg(arg));
IrOp varg2 = builtinLoadDouble(build, args);
IrOp res = build.inst(IrCmd::MAX_NUM, varg2, varg1); // Swapped arguments are required for consistency with VM builtins
for (int i = 3; i <= nparams; ++i)
{
IrOp arg = builtinLoadDouble(build, build.vmReg(vmRegOp(args) + (i - 2)));
res = build.inst(IrCmd::MAX_NUM, arg, res);
}
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), res);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinMathClamp(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 3 || nresults > 1)
return {BuiltinImplType::None, -1};
IrOp block = build.block(IrBlockKind::Internal);
LUAU_ASSERT(args.kind == IrOpKind::VmReg);
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + 1), fallback);
IrOp min = builtinLoadDouble(build, args);
IrOp max = builtinLoadDouble(build, build.vmReg(vmRegOp(args) + 1));
build.inst(IrCmd::JUMP_CMP_NUM, min, max, build.cond(IrCondition::NotLessEqual), fallback, block);
build.beginBlock(block);
IrOp v = builtinLoadDouble(build, build.vmReg(arg));
IrOp r = build.inst(IrCmd::MAX_NUM, min, v);
IrOp clamped = build.inst(IrCmd::MIN_NUM, max, r);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), clamped);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinMathUnary(IrBuilder& build, IrCmd cmd, int nparams, int ra, int arg, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
IrOp varg = builtinLoadDouble(build, build.vmReg(arg));
IrOp result = build.inst(cmd, varg);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), result);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinType(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
build.inst(IrCmd::FASTCALL, build.constUint(LBF_TYPE), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(1));
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TSTRING));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinTypeof(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
build.inst(IrCmd::FASTCALL, build.constUint(LBF_TYPEOF), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(1));
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TSTRING));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinBit32BinaryOp(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nparams > kBit32BinaryOpUnrolledParams || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
for (int i = 3; i <= nparams; ++i)
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + (i - 2)), fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp vbui = build.inst(IrCmd::NUM_TO_UINT, vb);
IrCmd cmd = IrCmd::NOP;
if (bfid == LBF_BIT32_BAND || bfid == LBF_BIT32_BTEST)
cmd = IrCmd::BITAND_UINT;
else if (bfid == LBF_BIT32_BXOR)
cmd = IrCmd::BITXOR_UINT;
else if (bfid == LBF_BIT32_BOR)
cmd = IrCmd::BITOR_UINT;
LUAU_ASSERT(cmd != IrCmd::NOP);
IrOp res = build.inst(cmd, vaui, vbui);
for (int i = 3; i <= nparams; ++i)
{
IrOp vc = builtinLoadDouble(build, build.vmReg(vmRegOp(args) + (i - 2)));
IrOp arg = build.inst(IrCmd::NUM_TO_UINT, vc);
res = build.inst(cmd, res, arg);
}
if (bfid == LBF_BIT32_BTEST)
{
IrOp falsey = build.block(IrBlockKind::Internal);
IrOp truthy = build.block(IrBlockKind::Internal);
IrOp exit = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_EQ_INT, res, build.constInt(0), falsey, truthy);
build.beginBlock(falsey);
build.inst(IrCmd::STORE_INT, build.vmReg(ra), build.constInt(0));
build.inst(IrCmd::JUMP, exit);
build.beginBlock(truthy);
build.inst(IrCmd::STORE_INT, build.vmReg(ra), build.constInt(1));
build.inst(IrCmd::JUMP, exit);
build.beginBlock(exit);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TBOOLEAN));
}
else
{
IrOp value = build.inst(IrCmd::UINT_TO_NUM, res);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
}
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinBit32Bnot(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp not_ = build.inst(IrCmd::BITNOT_UINT, vaui);
IrOp value = build.inst(IrCmd::UINT_TO_NUM, not_);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinBit32Shift(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
IrOp block = build.block(IrBlockKind::Internal);
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp vbi = build.inst(IrCmd::NUM_TO_INT, vb);
build.inst(IrCmd::JUMP_GE_UINT, vbi, build.constInt(32), fallback, block);
build.beginBlock(block);
IrCmd cmd = IrCmd::NOP;
if (bfid == LBF_BIT32_LSHIFT)
cmd = IrCmd::BITLSHIFT_UINT;
else if (bfid == LBF_BIT32_RSHIFT)
cmd = IrCmd::BITRSHIFT_UINT;
else if (bfid == LBF_BIT32_ARSHIFT)
cmd = IrCmd::BITARSHIFT_UINT;
LUAU_ASSERT(cmd != IrCmd::NOP);
IrOp shift = build.inst(cmd, vaui, vbi);
IrOp value = build.inst(IrCmd::UINT_TO_NUM, shift);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinBit32Rotate(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp vbi = build.inst(IrCmd::NUM_TO_INT, vb);
IrCmd cmd = (bfid == LBF_BIT32_LROTATE) ? IrCmd::BITLROTATE_UINT : IrCmd::BITRROTATE_UINT;
IrOp shift = build.inst(cmd, vaui, vbi);
IrOp value = build.inst(IrCmd::UINT_TO_NUM, shift);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinBit32Extract(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
IrOp n = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp f = build.inst(IrCmd::NUM_TO_INT, vb);
IrOp value;
if (nparams == 2)
{
IrOp block = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_GE_UINT, f, build.constInt(32), fallback, block);
build.beginBlock(block);
// TODO: this can be optimized using a bit-select instruction (bt on x86)
IrOp shift = build.inst(IrCmd::BITRSHIFT_UINT, n, f);
value = build.inst(IrCmd::BITAND_UINT, shift, build.constInt(1));
}
else
{
builtinCheckDouble(build, build.vmReg(args.index + 1), fallback);
IrOp vc = builtinLoadDouble(build, build.vmReg(args.index + 1));
IrOp w = build.inst(IrCmd::NUM_TO_INT, vc);
IrOp block1 = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_LT_INT, f, build.constInt(0), fallback, block1);
build.beginBlock(block1);
IrOp block2 = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_LT_INT, w, build.constInt(1), fallback, block2);
build.beginBlock(block2);
IrOp block3 = build.block(IrBlockKind::Internal);
IrOp fw = build.inst(IrCmd::ADD_INT, f, w);
build.inst(IrCmd::JUMP_LT_INT, fw, build.constInt(33), block3, fallback);
build.beginBlock(block3);
IrOp shift = build.inst(IrCmd::BITLSHIFT_UINT, build.constInt(0xfffffffe), build.inst(IrCmd::SUB_INT, w, build.constInt(1)));
IrOp m = build.inst(IrCmd::BITNOT_UINT, shift);
IrOp nf = build.inst(IrCmd::BITRSHIFT_UINT, n, f);
value = build.inst(IrCmd::BITAND_UINT, nf, m);
}
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), build.inst(IrCmd::UINT_TO_NUM, value));
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinBit32ExtractK(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp n = build.inst(IrCmd::NUM_TO_UINT, va);
double a2 = build.function.doubleOp(args);
int fw = int(a2);
int f = fw & 31;
int w1 = fw >> 5;
uint32_t m = ~(0xfffffffeu << w1);
IrOp nf = build.inst(IrCmd::BITRSHIFT_UINT, n, build.constInt(f));
IrOp and_ = build.inst(IrCmd::BITAND_UINT, nf, build.constInt(m));
IrOp value = build.inst(IrCmd::UINT_TO_NUM, and_);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinBit32Countz(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrCmd cmd = (bfid == LBF_BIT32_COUNTLZ) ? IrCmd::BITCOUNTLZ_UINT : IrCmd::BITCOUNTRZ_UINT;
IrOp bin = build.inst(cmd, vaui);
IrOp value = build.inst(IrCmd::UINT_TO_NUM, bin);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinBit32Replace(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 3 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(args.index + 1), fallback);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
IrOp vc = builtinLoadDouble(build, build.vmReg(args.index + 1));
IrOp n = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp v = build.inst(IrCmd::NUM_TO_UINT, vb);
IrOp f = build.inst(IrCmd::NUM_TO_INT, vc);
IrOp value;
if (nparams == 3)
{
IrOp block = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_GE_UINT, f, build.constInt(32), fallback, block);
build.beginBlock(block);
// TODO: this can be optimized using a bit-select instruction (btr on x86)
IrOp m = build.constInt(1);
IrOp shift = build.inst(IrCmd::BITLSHIFT_UINT, m, f);
IrOp not_ = build.inst(IrCmd::BITNOT_UINT, shift);
IrOp lhs = build.inst(IrCmd::BITAND_UINT, n, not_);
IrOp vm = build.inst(IrCmd::BITAND_UINT, v, m);
IrOp rhs = build.inst(IrCmd::BITLSHIFT_UINT, vm, f);
value = build.inst(IrCmd::BITOR_UINT, lhs, rhs);
}
else
{
builtinCheckDouble(build, build.vmReg(args.index + 2), fallback);
IrOp vd = builtinLoadDouble(build, build.vmReg(args.index + 2));
IrOp w = build.inst(IrCmd::NUM_TO_INT, vd);
IrOp block1 = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_LT_INT, f, build.constInt(0), fallback, block1);
build.beginBlock(block1);
IrOp block2 = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_LT_INT, w, build.constInt(1), fallback, block2);
build.beginBlock(block2);
IrOp block3 = build.block(IrBlockKind::Internal);
IrOp fw = build.inst(IrCmd::ADD_INT, f, w);
build.inst(IrCmd::JUMP_LT_INT, fw, build.constInt(33), block3, fallback);
build.beginBlock(block3);
IrOp shift1 = build.inst(IrCmd::BITLSHIFT_UINT, build.constInt(0xfffffffe), build.inst(IrCmd::SUB_INT, w, build.constInt(1)));
IrOp m = build.inst(IrCmd::BITNOT_UINT, shift1);
IrOp shift2 = build.inst(IrCmd::BITLSHIFT_UINT, m, f);
IrOp not_ = build.inst(IrCmd::BITNOT_UINT, shift2);
IrOp lhs = build.inst(IrCmd::BITAND_UINT, n, not_);
IrOp vm = build.inst(IrCmd::BITAND_UINT, v, m);
IrOp rhs = build.inst(IrCmd::BITLSHIFT_UINT, vm, f);
value = build.inst(IrCmd::BITOR_UINT, lhs, rhs);
}
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), build.inst(IrCmd::UINT_TO_NUM, value));
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinVector(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 3 || nresults > 1)
return {BuiltinImplType::None, -1};
LUAU_ASSERT(LUA_VECTOR_SIZE == 3);
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + 1), fallback);
IrOp x = builtinLoadDouble(build, build.vmReg(arg));
IrOp y = builtinLoadDouble(build, args);
IrOp z = builtinLoadDouble(build, build.vmReg(vmRegOp(args) + 1));
build.inst(IrCmd::STORE_VECTOR, build.vmReg(ra), x, y, z);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TVECTOR));
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg, IrOp args, int nparams, int nresults, IrOp fallback)
{
// Builtins are not allowed to handle variadic arguments
if (nparams == LUA_MULTRET)
return {BuiltinImplType::None, -1};
switch (bfid)
{
case LBF_ASSERT:
return translateBuiltinAssert(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_DEG:
return translateBuiltinMathDeg(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_RAD:
return translateBuiltinMathRad(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_LOG:
return translateBuiltinMathLog(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_MIN:
return translateBuiltinMathMin(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_MAX:
return translateBuiltinMathMax(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_CLAMP:
return translateBuiltinMathClamp(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_FLOOR:
return translateBuiltinMathUnary(build, IrCmd::FLOOR_NUM, nparams, ra, arg, nresults, fallback);
case LBF_MATH_CEIL:
return translateBuiltinMathUnary(build, IrCmd::CEIL_NUM, nparams, ra, arg, nresults, fallback);
case LBF_MATH_SQRT:
return translateBuiltinMathUnary(build, IrCmd::SQRT_NUM, nparams, ra, arg, nresults, fallback);
case LBF_MATH_ABS:
return translateBuiltinMathUnary(build, IrCmd::ABS_NUM, nparams, ra, arg, nresults, fallback);
case LBF_MATH_ROUND:
return translateBuiltinMathUnary(build, IrCmd::ROUND_NUM, nparams, ra, arg, nresults, fallback);
case LBF_MATH_EXP:
case LBF_MATH_ASIN:
case LBF_MATH_SIN:
case LBF_MATH_SINH:
case LBF_MATH_ACOS:
case LBF_MATH_COS:
case LBF_MATH_COSH:
case LBF_MATH_ATAN:
case LBF_MATH_TAN:
case LBF_MATH_TANH:
case LBF_MATH_LOG10:
return translateBuiltinNumberToNumberLibm(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_SIGN:
return translateBuiltinNumberToNumber(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_POW:
case LBF_MATH_FMOD:
case LBF_MATH_ATAN2:
return translateBuiltin2NumberToNumberLibm(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_LDEXP:
return translateBuiltinMathLdexp(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_FREXP:
case LBF_MATH_MODF:
return translateBuiltinNumberTo2Number(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_BAND:
case LBF_BIT32_BOR:
case LBF_BIT32_BXOR:
case LBF_BIT32_BTEST:
return translateBuiltinBit32BinaryOp(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_BNOT:
return translateBuiltinBit32Bnot(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_LSHIFT:
case LBF_BIT32_RSHIFT:
case LBF_BIT32_ARSHIFT:
return translateBuiltinBit32Shift(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_LROTATE:
case LBF_BIT32_RROTATE:
return translateBuiltinBit32Rotate(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_EXTRACT:
return translateBuiltinBit32Extract(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_EXTRACTK:
return translateBuiltinBit32ExtractK(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_COUNTLZ:
case LBF_BIT32_COUNTRZ:
return translateBuiltinBit32Countz(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_REPLACE:
return translateBuiltinBit32Replace(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_TYPE:
return translateBuiltinType(build, nparams, ra, arg, args, nresults, fallback);
case LBF_TYPEOF:
return translateBuiltinTypeof(build, nparams, ra, arg, args, nresults, fallback);
case LBF_VECTOR:
return translateBuiltinVector(build, nparams, ra, arg, args, nresults, fallback);
default:
return {BuiltinImplType::None, -1};
}
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,27 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
struct IrBuilder;
struct IrOp;
enum class BuiltinImplType
{
None,
UsesFallback, // Uses fallback for unsupported cases
};
struct BuiltinImplResult
{
BuiltinImplType type;
int actualResultCount;
};
BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg, IrOp args, int nparams, int nresults, IrOp fallback);
} // namespace CodeGen
} // namespace Luau

File diff suppressed because it is too large Load Diff

View File

@ -1,103 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Bytecode.h"
#include <stdint.h>
#include "ltm.h"
typedef uint32_t Instruction;
namespace Luau
{
namespace CodeGen
{
enum class IrCondition : uint8_t;
struct IrOp;
struct IrBuilder;
enum class IrCmd : uint8_t;
void translateInstLoadNil(IrBuilder& build, const Instruction* pc);
void translateInstLoadB(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstLoadN(IrBuilder& build, const Instruction* pc);
void translateInstLoadK(IrBuilder& build, const Instruction* pc);
void translateInstLoadKX(IrBuilder& build, const Instruction* pc);
void translateInstMove(IrBuilder& build, const Instruction* pc);
void translateInstJump(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstJumpBack(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstJumpIf(IrBuilder& build, const Instruction* pc, int pcpos, bool not_);
void translateInstJumpIfEq(IrBuilder& build, const Instruction* pc, int pcpos, bool not_);
void translateInstJumpIfCond(IrBuilder& build, const Instruction* pc, int pcpos, IrCondition cond);
void translateInstJumpX(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstJumpxEqNil(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstJumpxEqB(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstJumpxEqN(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstJumpxEqS(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstBinary(IrBuilder& build, const Instruction* pc, int pcpos, TMS tm);
void translateInstBinaryK(IrBuilder& build, const Instruction* pc, int pcpos, TMS tm);
void translateInstNot(IrBuilder& build, const Instruction* pc);
void translateInstMinus(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstLength(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstNewTable(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstDupTable(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstGetUpval(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstSetUpval(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstCloseUpvals(IrBuilder& build, const Instruction* pc);
void translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool customParams, int customParamCount, IrOp customArgs, IrOp next);
void translateInstForNPrep(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstForNLoop(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstForGPrepNext(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstForGPrepInext(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstForGLoopIpairs(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstGetTableN(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstSetTableN(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstGetTable(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstSetTable(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstGetImport(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstGetTableKS(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstSetTableKS(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstGetGlobal(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstSetGlobal(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstConcat(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstCapture(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstNamecall(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstAndX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c);
void translateInstOrX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c);
inline int getOpLength(LuauOpcode op)
{
switch (op)
{
case LOP_GETGLOBAL:
case LOP_SETGLOBAL:
case LOP_GETIMPORT:
case LOP_GETTABLEKS:
case LOP_SETTABLEKS:
case LOP_NAMECALL:
case LOP_JUMPIFEQ:
case LOP_JUMPIFLE:
case LOP_JUMPIFLT:
case LOP_JUMPIFNOTEQ:
case LOP_JUMPIFNOTLE:
case LOP_JUMPIFNOTLT:
case LOP_NEWTABLE:
case LOP_SETLIST:
case LOP_FORGLOOP:
case LOP_LOADKX:
case LOP_FASTCALL2:
case LOP_FASTCALL2K:
case LOP_JUMPXEQKNIL:
case LOP_JUMPXEQKB:
case LOP_JUMPXEQKN:
case LOP_JUMPXEQKS:
return 2;
default:
return 1;
}
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,795 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/IrUtils.h"
#include "Luau/IrBuilder.h"
#include "BitUtils.h"
#include "NativeState.h"
#include "lua.h"
#include "lnumutils.h"
#include <limits.h>
#include <math.h>
namespace Luau
{
namespace CodeGen
{
IrValueKind getCmdValueKind(IrCmd cmd)
{
switch (cmd)
{
case IrCmd::NOP:
return IrValueKind::None;
case IrCmd::LOAD_TAG:
return IrValueKind::Tag;
case IrCmd::LOAD_POINTER:
return IrValueKind::Pointer;
case IrCmd::LOAD_DOUBLE:
return IrValueKind::Double;
case IrCmd::LOAD_INT:
return IrValueKind::Int;
case IrCmd::LOAD_TVALUE:
case IrCmd::LOAD_NODE_VALUE_TV:
return IrValueKind::Tvalue;
case IrCmd::LOAD_ENV:
case IrCmd::GET_ARR_ADDR:
case IrCmd::GET_SLOT_NODE_ADDR:
case IrCmd::GET_HASH_NODE_ADDR:
return IrValueKind::Pointer;
case IrCmd::STORE_TAG:
case IrCmd::STORE_POINTER:
case IrCmd::STORE_DOUBLE:
case IrCmd::STORE_INT:
case IrCmd::STORE_VECTOR:
case IrCmd::STORE_TVALUE:
case IrCmd::STORE_NODE_VALUE_TV:
return IrValueKind::None;
case IrCmd::ADD_INT:
case IrCmd::SUB_INT:
return IrValueKind::Int;
case IrCmd::ADD_NUM:
case IrCmd::SUB_NUM:
case IrCmd::MUL_NUM:
case IrCmd::DIV_NUM:
case IrCmd::MOD_NUM:
case IrCmd::MIN_NUM:
case IrCmd::MAX_NUM:
case IrCmd::UNM_NUM:
case IrCmd::FLOOR_NUM:
case IrCmd::CEIL_NUM:
case IrCmd::ROUND_NUM:
case IrCmd::SQRT_NUM:
case IrCmd::ABS_NUM:
return IrValueKind::Double;
case IrCmd::NOT_ANY:
return IrValueKind::Int;
case IrCmd::JUMP:
case IrCmd::JUMP_IF_TRUTHY:
case IrCmd::JUMP_IF_FALSY:
case IrCmd::JUMP_EQ_TAG:
case IrCmd::JUMP_EQ_INT:
case IrCmd::JUMP_LT_INT:
case IrCmd::JUMP_GE_UINT:
case IrCmd::JUMP_EQ_POINTER:
case IrCmd::JUMP_CMP_NUM:
case IrCmd::JUMP_CMP_ANY:
case IrCmd::JUMP_SLOT_MATCH:
return IrValueKind::None;
case IrCmd::TABLE_LEN:
return IrValueKind::Double;
case IrCmd::NEW_TABLE:
case IrCmd::DUP_TABLE:
return IrValueKind::Pointer;
case IrCmd::TRY_NUM_TO_INDEX:
return IrValueKind::Int;
case IrCmd::TRY_CALL_FASTGETTM:
return IrValueKind::Pointer;
case IrCmd::INT_TO_NUM:
case IrCmd::UINT_TO_NUM:
return IrValueKind::Double;
case IrCmd::NUM_TO_INT:
case IrCmd::NUM_TO_UINT:
return IrValueKind::Int;
case IrCmd::ADJUST_STACK_TO_REG:
case IrCmd::ADJUST_STACK_TO_TOP:
return IrValueKind::None;
case IrCmd::FASTCALL:
return IrValueKind::None;
case IrCmd::INVOKE_FASTCALL:
return IrValueKind::Int;
case IrCmd::CHECK_FASTCALL_RES:
case IrCmd::DO_ARITH:
case IrCmd::DO_LEN:
case IrCmd::GET_TABLE:
case IrCmd::SET_TABLE:
case IrCmd::GET_IMPORT:
case IrCmd::CONCAT:
case IrCmd::GET_UPVALUE:
case IrCmd::SET_UPVALUE:
case IrCmd::PREPARE_FORN:
case IrCmd::CHECK_TAG:
case IrCmd::CHECK_READONLY:
case IrCmd::CHECK_NO_METATABLE:
case IrCmd::CHECK_SAFE_ENV:
case IrCmd::CHECK_ARRAY_SIZE:
case IrCmd::CHECK_SLOT_MATCH:
case IrCmd::CHECK_NODE_NO_NEXT:
case IrCmd::INTERRUPT:
case IrCmd::CHECK_GC:
case IrCmd::BARRIER_OBJ:
case IrCmd::BARRIER_TABLE_BACK:
case IrCmd::BARRIER_TABLE_FORWARD:
case IrCmd::SET_SAVEDPC:
case IrCmd::CLOSE_UPVALS:
case IrCmd::CAPTURE:
case IrCmd::SETLIST:
case IrCmd::CALL:
case IrCmd::RETURN:
case IrCmd::FORGLOOP:
case IrCmd::FORGLOOP_FALLBACK:
case IrCmd::FORGPREP_XNEXT_FALLBACK:
case IrCmd::COVERAGE:
case IrCmd::FALLBACK_GETGLOBAL:
case IrCmd::FALLBACK_SETGLOBAL:
case IrCmd::FALLBACK_GETTABLEKS:
case IrCmd::FALLBACK_SETTABLEKS:
case IrCmd::FALLBACK_NAMECALL:
case IrCmd::FALLBACK_PREPVARARGS:
case IrCmd::FALLBACK_GETVARARGS:
case IrCmd::FALLBACK_NEWCLOSURE:
case IrCmd::FALLBACK_DUPCLOSURE:
case IrCmd::FALLBACK_FORGPREP:
return IrValueKind::None;
case IrCmd::SUBSTITUTE:
return IrValueKind::Unknown;
case IrCmd::BITAND_UINT:
case IrCmd::BITXOR_UINT:
case IrCmd::BITOR_UINT:
case IrCmd::BITNOT_UINT:
case IrCmd::BITLSHIFT_UINT:
case IrCmd::BITRSHIFT_UINT:
case IrCmd::BITARSHIFT_UINT:
case IrCmd::BITLROTATE_UINT:
case IrCmd::BITRROTATE_UINT:
case IrCmd::BITCOUNTLZ_UINT:
case IrCmd::BITCOUNTRZ_UINT:
return IrValueKind::Int;
case IrCmd::INVOKE_LIBM:
return IrValueKind::Double;
}
LUAU_UNREACHABLE();
}
static void removeInstUse(IrFunction& function, uint32_t instIdx)
{
IrInst& inst = function.instructions[instIdx];
LUAU_ASSERT(inst.useCount);
inst.useCount--;
if (inst.useCount == 0)
kill(function, inst);
}
static void removeBlockUse(IrFunction& function, uint32_t blockIdx)
{
IrBlock& block = function.blocks[blockIdx];
LUAU_ASSERT(block.useCount);
block.useCount--;
// Entry block is never removed because is has an implicit use
if (block.useCount == 0 && blockIdx != 0)
kill(function, block);
}
void addUse(IrFunction& function, IrOp op)
{
if (op.kind == IrOpKind::Inst)
function.instructions[op.index].useCount++;
else if (op.kind == IrOpKind::Block)
function.blocks[op.index].useCount++;
}
void removeUse(IrFunction& function, IrOp op)
{
if (op.kind == IrOpKind::Inst)
removeInstUse(function, op.index);
else if (op.kind == IrOpKind::Block)
removeBlockUse(function, op.index);
}
bool isGCO(uint8_t tag)
{
// mirrors iscollectable(o) from VM/lobject.h
return tag >= LUA_TSTRING;
}
void kill(IrFunction& function, IrInst& inst)
{
LUAU_ASSERT(inst.useCount == 0);
inst.cmd = IrCmd::NOP;
removeUse(function, inst.a);
removeUse(function, inst.b);
removeUse(function, inst.c);
removeUse(function, inst.d);
removeUse(function, inst.e);
removeUse(function, inst.f);
inst.a = {};
inst.b = {};
inst.c = {};
inst.d = {};
inst.e = {};
inst.f = {};
}
void kill(IrFunction& function, uint32_t start, uint32_t end)
{
// Kill instructions in reverse order to avoid killing instructions that are still marked as used
for (int i = int(end); i >= int(start); i--)
{
LUAU_ASSERT(unsigned(i) < function.instructions.size());
IrInst& curr = function.instructions[i];
if (curr.cmd == IrCmd::NOP)
continue;
kill(function, curr);
}
}
void kill(IrFunction& function, IrBlock& block)
{
LUAU_ASSERT(block.useCount == 0);
block.kind = IrBlockKind::Dead;
kill(function, block.start, block.finish);
block.start = ~0u;
block.finish = ~0u;
}
void replace(IrFunction& function, IrOp& original, IrOp replacement)
{
// Add use before removing new one if that's the last one keeping target operand alive
addUse(function, replacement);
removeUse(function, original);
original = replacement;
}
void replace(IrFunction& function, IrBlock& block, uint32_t instIdx, IrInst replacement)
{
IrInst& inst = function.instructions[instIdx];
// Add uses before removing new ones if those are the last ones keeping target operand alive
addUse(function, replacement.a);
addUse(function, replacement.b);
addUse(function, replacement.c);
addUse(function, replacement.d);
addUse(function, replacement.e);
addUse(function, replacement.f);
// An extra reference is added so block will not remove itself
block.useCount++;
// If we introduced an earlier terminating instruction, all following instructions become dead
if (!isBlockTerminator(inst.cmd) && isBlockTerminator(replacement.cmd))
{
// Block has has to be fully constructed before replacement is performed
LUAU_ASSERT(block.finish != ~0u);
LUAU_ASSERT(instIdx + 1 <= block.finish);
kill(function, instIdx + 1, block.finish);
block.finish = instIdx;
}
removeUse(function, inst.a);
removeUse(function, inst.b);
removeUse(function, inst.c);
removeUse(function, inst.d);
removeUse(function, inst.e);
removeUse(function, inst.f);
// Inherit existing use count (last use is skipped as it will be defined later)
replacement.useCount = inst.useCount;
inst = replacement;
// Removing the earlier extra reference, this might leave the block without users without marking it as dead
// This will have to be handled by separate dead code elimination
block.useCount--;
}
void substitute(IrFunction& function, IrInst& inst, IrOp replacement)
{
LUAU_ASSERT(!isBlockTerminator(inst.cmd));
inst.cmd = IrCmd::SUBSTITUTE;
addUse(function, replacement);
removeUse(function, inst.a);
removeUse(function, inst.b);
removeUse(function, inst.c);
removeUse(function, inst.d);
removeUse(function, inst.e);
removeUse(function, inst.f);
inst.a = replacement;
inst.b = {};
inst.c = {};
inst.d = {};
inst.e = {};
inst.f = {};
}
void applySubstitutions(IrFunction& function, IrOp& op)
{
if (op.kind == IrOpKind::Inst)
{
IrInst& src = function.instructions[op.index];
if (src.cmd == IrCmd::SUBSTITUTE)
{
op.kind = src.a.kind;
op.index = src.a.index;
// If we substitute with the result of a different instruction, update the use count
if (op.kind == IrOpKind::Inst)
{
IrInst& dst = function.instructions[op.index];
LUAU_ASSERT(dst.cmd != IrCmd::SUBSTITUTE && "chained substitutions are not allowed");
dst.useCount++;
}
LUAU_ASSERT(src.useCount > 0);
src.useCount--;
if (src.useCount == 0)
{
src.cmd = IrCmd::NOP;
removeUse(function, src.a);
src.a = {};
}
}
}
}
void applySubstitutions(IrFunction& function, IrInst& inst)
{
applySubstitutions(function, inst.a);
applySubstitutions(function, inst.b);
applySubstitutions(function, inst.c);
applySubstitutions(function, inst.d);
applySubstitutions(function, inst.e);
applySubstitutions(function, inst.f);
}
bool compare(double a, double b, IrCondition cond)
{
switch (cond)
{
case IrCondition::Equal:
return a == b;
case IrCondition::NotEqual:
return a != b;
case IrCondition::Less:
return a < b;
case IrCondition::NotLess:
return !(a < b);
case IrCondition::LessEqual:
return a <= b;
case IrCondition::NotLessEqual:
return !(a <= b);
case IrCondition::Greater:
return a > b;
case IrCondition::NotGreater:
return !(a > b);
case IrCondition::GreaterEqual:
return a >= b;
case IrCondition::NotGreaterEqual:
return !(a >= b);
default:
LUAU_ASSERT(!"Unsupported condition");
}
return false;
}
void foldConstants(IrBuilder& build, IrFunction& function, IrBlock& block, uint32_t index)
{
IrInst& inst = function.instructions[index];
switch (inst.cmd)
{
case IrCmd::ADD_INT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
// We need to avoid signed integer overflow, but we also have to produce a result
// So we add numbers as unsigned and use fixed-width integer types to force a two's complement evaluation
int32_t lhs = function.intOp(inst.a);
int32_t rhs = function.intOp(inst.b);
int sum = int32_t(uint32_t(lhs) + uint32_t(rhs));
substitute(function, inst, build.constInt(sum));
}
break;
case IrCmd::SUB_INT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
// We need to avoid signed integer overflow, but we also have to produce a result
// So we subtract numbers as unsigned and use fixed-width integer types to force a two's complement evaluation
int32_t lhs = function.intOp(inst.a);
int32_t rhs = function.intOp(inst.b);
int sum = int32_t(uint32_t(lhs) - uint32_t(rhs));
substitute(function, inst, build.constInt(sum));
}
break;
case IrCmd::ADD_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(function.doubleOp(inst.a) + function.doubleOp(inst.b)));
break;
case IrCmd::SUB_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(function.doubleOp(inst.a) - function.doubleOp(inst.b)));
break;
case IrCmd::MUL_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(function.doubleOp(inst.a) * function.doubleOp(inst.b)));
break;
case IrCmd::DIV_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(function.doubleOp(inst.a) / function.doubleOp(inst.b)));
break;
case IrCmd::MOD_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(luai_nummod(function.doubleOp(inst.a), function.doubleOp(inst.b))));
break;
case IrCmd::MIN_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
double a1 = function.doubleOp(inst.a);
double a2 = function.doubleOp(inst.b);
substitute(function, inst, build.constDouble(a1 < a2 ? a1 : a2));
}
break;
case IrCmd::MAX_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
double a1 = function.doubleOp(inst.a);
double a2 = function.doubleOp(inst.b);
substitute(function, inst, build.constDouble(a1 > a2 ? a1 : a2));
}
break;
case IrCmd::UNM_NUM:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(-function.doubleOp(inst.a)));
break;
case IrCmd::FLOOR_NUM:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(floor(function.doubleOp(inst.a))));
break;
case IrCmd::CEIL_NUM:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(ceil(function.doubleOp(inst.a))));
break;
case IrCmd::ROUND_NUM:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(round(function.doubleOp(inst.a))));
break;
case IrCmd::SQRT_NUM:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(sqrt(function.doubleOp(inst.a))));
break;
case IrCmd::ABS_NUM:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(fabs(function.doubleOp(inst.a))));
break;
case IrCmd::NOT_ANY:
if (inst.a.kind == IrOpKind::Constant)
{
uint8_t a = function.tagOp(inst.a);
if (a == LUA_TNIL)
substitute(function, inst, build.constInt(1));
else if (a != LUA_TBOOLEAN)
substitute(function, inst, build.constInt(0));
else if (inst.b.kind == IrOpKind::Constant)
substitute(function, inst, build.constInt(function.intOp(inst.b) == 1 ? 0 : 1));
}
break;
case IrCmd::JUMP_EQ_TAG:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
if (function.tagOp(inst.a) == function.tagOp(inst.b))
replace(function, block, index, {IrCmd::JUMP, inst.c});
else
replace(function, block, index, {IrCmd::JUMP, inst.d});
}
break;
case IrCmd::JUMP_EQ_INT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
if (function.intOp(inst.a) == function.intOp(inst.b))
replace(function, block, index, {IrCmd::JUMP, inst.c});
else
replace(function, block, index, {IrCmd::JUMP, inst.d});
}
break;
case IrCmd::JUMP_LT_INT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
if (function.intOp(inst.a) < function.intOp(inst.b))
replace(function, block, index, {IrCmd::JUMP, inst.c});
else
replace(function, block, index, {IrCmd::JUMP, inst.d});
}
break;
case IrCmd::JUMP_GE_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
if (unsigned(function.intOp(inst.a)) >= unsigned(function.intOp(inst.b)))
replace(function, block, index, {IrCmd::JUMP, inst.c});
else
replace(function, block, index, {IrCmd::JUMP, inst.d});
}
break;
case IrCmd::JUMP_CMP_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
if (compare(function.doubleOp(inst.a), function.doubleOp(inst.b), conditionOp(inst.c)))
replace(function, block, index, {IrCmd::JUMP, inst.d});
else
replace(function, block, index, {IrCmd::JUMP, inst.e});
}
break;
case IrCmd::TRY_NUM_TO_INDEX:
if (inst.a.kind == IrOpKind::Constant)
{
double value = function.doubleOp(inst.a);
// To avoid undefined behavior of casting a value not representable in the target type, we check the range
if (value >= INT_MIN && value <= INT_MAX)
{
int arrIndex = int(value);
if (double(arrIndex) == value)
substitute(function, inst, build.constInt(arrIndex));
else
replace(function, block, index, {IrCmd::JUMP, inst.b});
}
else
{
replace(function, block, index, {IrCmd::JUMP, inst.b});
}
}
break;
case IrCmd::INT_TO_NUM:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(double(function.intOp(inst.a))));
break;
case IrCmd::UINT_TO_NUM:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constDouble(double(unsigned(function.intOp(inst.a)))));
break;
case IrCmd::NUM_TO_INT:
if (inst.a.kind == IrOpKind::Constant)
{
double value = function.doubleOp(inst.a);
// To avoid undefined behavior of casting a value not representable in the target type, we check the range
if (value >= INT_MIN && value <= INT_MAX)
substitute(function, inst, build.constInt(int(value)));
}
break;
case IrCmd::NUM_TO_UINT:
if (inst.a.kind == IrOpKind::Constant)
{
double value = function.doubleOp(inst.a);
// To avoid undefined behavior of casting a value not representable in the target type, we check the range
if (value >= 0 && value <= UINT_MAX)
substitute(function, inst, build.constInt(unsigned(function.doubleOp(inst.a))));
}
break;
case IrCmd::CHECK_TAG:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
if (function.tagOp(inst.a) == function.tagOp(inst.b))
kill(function, inst);
else
replace(function, block, index, {IrCmd::JUMP, inst.c}); // Shows a conflict in assumptions on this path
}
break;
case IrCmd::BITAND_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
unsigned op1 = unsigned(function.intOp(inst.a));
unsigned op2 = unsigned(function.intOp(inst.b));
substitute(function, inst, build.constInt(op1 & op2));
}
else
{
if (inst.a.kind == IrOpKind::Constant && function.intOp(inst.a) == 0) // (0 & b) -> 0
substitute(function, inst, build.constInt(0));
else if (inst.a.kind == IrOpKind::Constant && function.intOp(inst.a) == -1) // (-1 & b) -> b
substitute(function, inst, inst.b);
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == 0) // (a & 0) -> 0
substitute(function, inst, build.constInt(0));
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == -1) // (a & -1) -> a
substitute(function, inst, inst.a);
}
break;
case IrCmd::BITXOR_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
unsigned op1 = unsigned(function.intOp(inst.a));
unsigned op2 = unsigned(function.intOp(inst.b));
substitute(function, inst, build.constInt(op1 ^ op2));
}
else
{
if (inst.a.kind == IrOpKind::Constant && function.intOp(inst.a) == 0) // (0 ^ b) -> b
substitute(function, inst, inst.b);
else if (inst.a.kind == IrOpKind::Constant && function.intOp(inst.a) == -1) // (-1 ^ b) -> ~b
replace(function, block, index, {IrCmd::BITNOT_UINT, inst.b});
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == 0) // (a ^ 0) -> a
substitute(function, inst, inst.a);
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == -1) // (a ^ -1) -> ~a
replace(function, block, index, {IrCmd::BITNOT_UINT, inst.a});
}
break;
case IrCmd::BITOR_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
unsigned op1 = unsigned(function.intOp(inst.a));
unsigned op2 = unsigned(function.intOp(inst.b));
substitute(function, inst, build.constInt(op1 | op2));
}
else
{
if (inst.a.kind == IrOpKind::Constant && function.intOp(inst.a) == 0) // (0 | b) -> b
substitute(function, inst, inst.b);
else if (inst.a.kind == IrOpKind::Constant && function.intOp(inst.a) == -1) // (-1 | b) -> -1
substitute(function, inst, build.constInt(-1));
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == 0) // (a | 0) -> a
substitute(function, inst, inst.a);
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == -1) // (a | -1) -> -1
substitute(function, inst, build.constInt(-1));
}
break;
case IrCmd::BITNOT_UINT:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constInt(~unsigned(function.intOp(inst.a))));
break;
case IrCmd::BITLSHIFT_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
unsigned op1 = unsigned(function.intOp(inst.a));
int op2 = function.intOp(inst.b);
if (unsigned(op2) < 32)
substitute(function, inst, build.constInt(op1 << op2));
}
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == 0)
{
substitute(function, inst, inst.a);
}
break;
case IrCmd::BITRSHIFT_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
unsigned op1 = unsigned(function.intOp(inst.a));
int op2 = function.intOp(inst.b);
if (unsigned(op2) < 32)
substitute(function, inst, build.constInt(op1 >> op2));
}
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == 0)
{
substitute(function, inst, inst.a);
}
break;
case IrCmd::BITARSHIFT_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
int op1 = function.intOp(inst.a);
int op2 = function.intOp(inst.b);
if (unsigned(op2) < 32)
{
// note: technically right shift of negative values is UB, but this behavior is getting defined in C++20 and all compilers do the
// right (shift) thing.
substitute(function, inst, build.constInt(op1 >> op2));
}
}
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == 0)
{
substitute(function, inst, inst.a);
}
break;
case IrCmd::BITLROTATE_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
substitute(function, inst, build.constInt(lrotate(unsigned(function.intOp(inst.a)), function.intOp(inst.b))));
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == 0)
substitute(function, inst, inst.a);
break;
case IrCmd::BITRROTATE_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
substitute(function, inst, build.constInt(rrotate(unsigned(function.intOp(inst.a)), function.intOp(inst.b))));
else if (inst.b.kind == IrOpKind::Constant && function.intOp(inst.b) == 0)
substitute(function, inst, inst.a);
break;
case IrCmd::BITCOUNTLZ_UINT:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constInt(countlz(unsigned(function.intOp(inst.a)))));
break;
case IrCmd::BITCOUNTRZ_UINT:
if (inst.a.kind == IrOpKind::Constant)
substitute(function, inst, build.constInt(countrz(unsigned(function.intOp(inst.a)))));
break;
default:
break;
}
}
uint32_t getNativeContextOffset(int bfid)
{
switch (bfid)
{
case LBF_MATH_ACOS:
return offsetof(NativeContext, libm_acos);
case LBF_MATH_ASIN:
return offsetof(NativeContext, libm_asin);
case LBF_MATH_ATAN2:
return offsetof(NativeContext, libm_atan2);
case LBF_MATH_ATAN:
return offsetof(NativeContext, libm_atan);
case LBF_MATH_COSH:
return offsetof(NativeContext, libm_cosh);
case LBF_MATH_COS:
return offsetof(NativeContext, libm_cos);
case LBF_MATH_EXP:
return offsetof(NativeContext, libm_exp);
case LBF_MATH_LOG10:
return offsetof(NativeContext, libm_log10);
case LBF_MATH_LOG:
return offsetof(NativeContext, libm_log);
case LBF_MATH_SINH:
return offsetof(NativeContext, libm_sinh);
case LBF_MATH_SIN:
return offsetof(NativeContext, libm_sin);
case LBF_MATH_TANH:
return offsetof(NativeContext, libm_tanh);
case LBF_MATH_TAN:
return offsetof(NativeContext, libm_tan);
case LBF_MATH_FMOD:
return offsetof(NativeContext, libm_fmod);
case LBF_MATH_POW:
return offsetof(NativeContext, libm_pow);
case LBF_IR_MATH_LOG2:
return offsetof(NativeContext, libm_log2);
case LBF_MATH_LDEXP:
return offsetof(NativeContext, libm_ldexp);
default:
LUAU_ASSERT(!"Unsupported bfid");
}
return 0;
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,222 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "IrValueLocationTracking.h"
namespace Luau
{
namespace CodeGen
{
IrValueLocationTracking::IrValueLocationTracking(IrFunction& function)
: function(function)
{
vmRegValue.fill(kInvalidInstIdx);
}
void IrValueLocationTracking::setRestoreCallack(void* context, void (*callback)(void* context, IrInst& inst))
{
restoreCallbackCtx = context;
restoreCallback = callback;
}
void IrValueLocationTracking::beforeInstLowering(IrInst& inst)
{
switch (inst.cmd)
{
case IrCmd::STORE_TAG:
case IrCmd::STORE_POINTER:
case IrCmd::STORE_DOUBLE:
case IrCmd::STORE_INT:
case IrCmd::STORE_VECTOR:
case IrCmd::STORE_TVALUE:
invalidateRestoreOp(inst.a);
break;
case IrCmd::ADJUST_STACK_TO_REG:
invalidateRestoreVmRegs(vmRegOp(inst.a), -1);
break;
case IrCmd::FASTCALL:
invalidateRestoreVmRegs(vmRegOp(inst.b), function.intOp(inst.f));
break;
case IrCmd::INVOKE_FASTCALL:
// Multiple return sequences (count == -1) are defined by ADJUST_STACK_TO_REG
if (int count = function.intOp(inst.f); count != -1)
invalidateRestoreVmRegs(vmRegOp(inst.b), count);
break;
case IrCmd::DO_ARITH:
case IrCmd::DO_LEN:
case IrCmd::GET_TABLE:
case IrCmd::GET_IMPORT:
invalidateRestoreOp(inst.a);
break;
case IrCmd::CONCAT:
invalidateRestoreVmRegs(vmRegOp(inst.a), function.uintOp(inst.b));
break;
case IrCmd::GET_UPVALUE:
invalidateRestoreOp(inst.a);
break;
case IrCmd::PREPARE_FORN:
invalidateRestoreOp(inst.a);
invalidateRestoreOp(inst.b);
invalidateRestoreOp(inst.c);
break;
case IrCmd::CALL:
// Even if result count is limited, all registers starting from function (ra) might be modified
invalidateRestoreVmRegs(vmRegOp(inst.a), -1);
break;
case IrCmd::FORGLOOP:
case IrCmd::FORGLOOP_FALLBACK:
// Even if result count is limited, all registers starting from iteration index (ra+2) might be modified
invalidateRestoreVmRegs(vmRegOp(inst.a) + 2, -1);
break;
case IrCmd::FALLBACK_GETGLOBAL:
case IrCmd::FALLBACK_GETTABLEKS:
invalidateRestoreOp(inst.b);
break;
case IrCmd::FALLBACK_NAMECALL:
invalidateRestoreVmRegs(vmRegOp(inst.b), 2);
break;
case IrCmd::FALLBACK_GETVARARGS:
invalidateRestoreVmRegs(vmRegOp(inst.b), function.intOp(inst.c));
break;
case IrCmd::FALLBACK_NEWCLOSURE:
case IrCmd::FALLBACK_DUPCLOSURE:
invalidateRestoreOp(inst.b);
break;
case IrCmd::FALLBACK_FORGPREP:
invalidateRestoreVmRegs(vmRegOp(inst.b), 3);
break;
// Make sure all VmReg referencing instructions are handled explicitly (only register reads here)
case IrCmd::LOAD_TAG:
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_TVALUE:
case IrCmd::JUMP_IF_TRUTHY:
case IrCmd::JUMP_IF_FALSY:
case IrCmd::JUMP_CMP_ANY:
case IrCmd::SET_TABLE:
case IrCmd::SET_UPVALUE:
case IrCmd::INTERRUPT:
case IrCmd::BARRIER_OBJ:
case IrCmd::BARRIER_TABLE_FORWARD:
case IrCmd::CLOSE_UPVALS:
case IrCmd::CAPTURE:
case IrCmd::SETLIST:
case IrCmd::RETURN:
case IrCmd::FORGPREP_XNEXT_FALLBACK:
case IrCmd::FALLBACK_SETGLOBAL:
case IrCmd::FALLBACK_SETTABLEKS:
case IrCmd::FALLBACK_PREPVARARGS:
case IrCmd::ADJUST_STACK_TO_TOP:
break;
// These instrucitons read VmReg only after optimizeMemoryOperandsX64
case IrCmd::CHECK_TAG:
case IrCmd::ADD_NUM:
case IrCmd::SUB_NUM:
case IrCmd::MUL_NUM:
case IrCmd::DIV_NUM:
case IrCmd::MOD_NUM:
case IrCmd::MIN_NUM:
case IrCmd::MAX_NUM:
case IrCmd::JUMP_EQ_TAG:
case IrCmd::JUMP_CMP_NUM:
break;
default:
// All instructions which reference registers have to be handled explicitly
LUAU_ASSERT(inst.a.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.b.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.d.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.e.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.f.kind != IrOpKind::VmReg);
break;
}
}
void IrValueLocationTracking::afterInstLowering(IrInst& inst, uint32_t instIdx)
{
switch (inst.cmd)
{
case IrCmd::LOAD_TAG:
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_TVALUE:
recordRestoreOp(instIdx, inst.a);
break;
case IrCmd::STORE_POINTER:
case IrCmd::STORE_DOUBLE:
case IrCmd::STORE_INT:
case IrCmd::STORE_TVALUE:
// If this is not the last use of the stored value, we can restore it from this new location
if (inst.b.kind == IrOpKind::Inst && function.instOp(inst.b).lastUse != instIdx)
recordRestoreOp(inst.b.index, inst.a);
break;
default:
break;
}
}
void IrValueLocationTracking::recordRestoreOp(uint32_t instIdx, IrOp location)
{
if (location.kind == IrOpKind::VmReg)
{
int reg = vmRegOp(location);
if (reg > maxReg)
maxReg = reg;
// Record location in register memory only if register is not captured
if (!function.cfg.captured.regs.test(reg))
function.recordRestoreOp(instIdx, location);
vmRegValue[reg] = instIdx;
}
else if (location.kind == IrOpKind::VmConst)
{
function.recordRestoreOp(instIdx, location);
}
}
void IrValueLocationTracking::invalidateRestoreOp(IrOp location)
{
if (location.kind == IrOpKind::VmReg)
{
uint32_t& instIdx = vmRegValue[vmRegOp(location)];
if (instIdx != kInvalidInstIdx)
{
IrInst& inst = function.instructions[instIdx];
// If instruction value is spilled and memory location is about to be lost, it has to be restored immediately
if (inst.needsReload)
restoreCallback(restoreCallbackCtx, inst);
// Instruction loses its memory storage location
function.recordRestoreOp(instIdx, IrOp());
// Register loses link with instruction
instIdx = kInvalidInstIdx;
}
}
else if (location.kind == IrOpKind::VmConst)
{
LUAU_ASSERT(!"VM constants are immutable");
}
}
void IrValueLocationTracking::invalidateRestoreVmRegs(int start, int count)
{
int end = count == -1 ? 255 : start + count;
if (end > maxReg)
end = maxReg;
for (int reg = start; reg <= end; reg++)
invalidateRestoreOp(IrOp{IrOpKind::VmReg, uint8_t(reg)});
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,38 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/IrData.h"
#include <array>
namespace Luau
{
namespace CodeGen
{
struct IrValueLocationTracking
{
IrValueLocationTracking(IrFunction& function);
void setRestoreCallack(void* context, void (*callback)(void* context, IrInst& inst));
void beforeInstLowering(IrInst& inst);
void afterInstLowering(IrInst& inst, uint32_t instIdx);
void recordRestoreOp(uint32_t instIdx, IrOp location);
void invalidateRestoreOp(IrOp location);
void invalidateRestoreVmRegs(int start, int count);
IrFunction& function;
std::array<uint32_t, 256> vmRegValue;
// For range/full invalidations, we only want to visit a limited number of data that we have recorded
int maxReg = 0;
void* restoreCallbackCtx = nullptr;
void (*restoreCallback)(void* context, IrInst& inst) = nullptr;
};
} // namespace CodeGen
} // namespace Luau

View File

@ -1,109 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "NativeState.h"
#include "Luau/UnwindBuilder.h"
#include "CodeGenUtils.h"
#include "lbuiltins.h"
#include "lgc.h"
#include "ltable.h"
#include "lfunc.h"
#include "lvm.h"
#include <math.h>
#include <string.h>
namespace Luau
{
namespace CodeGen
{
constexpr unsigned kBlockSize = 4 * 1024 * 1024;
constexpr unsigned kMaxTotalSize = 256 * 1024 * 1024;
NativeState::NativeState()
: codeAllocator(kBlockSize, kMaxTotalSize)
{
}
NativeState::~NativeState() = default;
void initFunctions(NativeState& data)
{
static_assert(sizeof(data.context.luauF_table) == sizeof(luauF_table), "fastcall tables are not of the same length");
memcpy(data.context.luauF_table, luauF_table, sizeof(luauF_table));
data.context.luaV_lessthan = luaV_lessthan;
data.context.luaV_lessequal = luaV_lessequal;
data.context.luaV_equalval = luaV_equalval;
data.context.luaV_doarith = luaV_doarith;
data.context.luaV_dolen = luaV_dolen;
data.context.luaV_prepareFORN = luaV_prepareFORN;
data.context.luaV_gettable = luaV_gettable;
data.context.luaV_settable = luaV_settable;
data.context.luaV_getimport = luaV_getimport;
data.context.luaV_concat = luaV_concat;
data.context.luaH_getn = luaH_getn;
data.context.luaH_new = luaH_new;
data.context.luaH_clone = luaH_clone;
data.context.luaH_resizearray = luaH_resizearray;
data.context.luaC_barriertable = luaC_barriertable;
data.context.luaC_barrierf = luaC_barrierf;
data.context.luaC_barrierback = luaC_barrierback;
data.context.luaC_step = luaC_step;
data.context.luaF_close = luaF_close;
data.context.luaT_gettm = luaT_gettm;
data.context.luaT_objtypenamestr = luaT_objtypenamestr;
data.context.libm_exp = exp;
data.context.libm_pow = pow;
data.context.libm_fmod = fmod;
data.context.libm_log = log;
data.context.libm_log2 = log2;
data.context.libm_log10 = log10;
data.context.libm_ldexp = ldexp;
data.context.libm_round = round;
data.context.libm_frexp = frexp;
data.context.libm_modf = modf;
data.context.libm_asin = asin;
data.context.libm_sin = sin;
data.context.libm_sinh = sinh;
data.context.libm_acos = acos;
data.context.libm_cos = cos;
data.context.libm_cosh = cosh;
data.context.libm_atan = atan;
data.context.libm_atan2 = atan2;
data.context.libm_tan = tan;
data.context.libm_tanh = tanh;
data.context.forgLoopTableIter = forgLoopTableIter;
data.context.forgLoopNodeIter = forgLoopNodeIter;
data.context.forgLoopNonTableFallback = forgLoopNonTableFallback;
data.context.forgPrepXnextFallback = forgPrepXnextFallback;
data.context.callProlog = callProlog;
data.context.callEpilogC = callEpilogC;
data.context.callFallback = callFallback;
data.context.executeGETGLOBAL = executeGETGLOBAL;
data.context.executeSETGLOBAL = executeSETGLOBAL;
data.context.executeGETTABLEKS = executeGETTABLEKS;
data.context.executeSETTABLEKS = executeSETTABLEKS;
data.context.executeNEWCLOSURE = executeNEWCLOSURE;
data.context.executeNAMECALL = executeNAMECALL;
data.context.executeFORGPREP = executeFORGPREP;
data.context.executeGETVARARGS = executeGETVARARGS;
data.context.executeDUPCLOSURE = executeDUPCLOSURE;
data.context.executePREPVARARGS = executePREPVARARGS;
data.context.executeSETLIST = executeSETLIST;
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,126 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Bytecode.h"
#include "Luau/CodeAllocator.h"
#include "Luau/Label.h"
#include <memory>
#include <stdint.h>
#include "ldebug.h"
#include "lobject.h"
#include "ltm.h"
#include "lstate.h"
typedef int (*luau_FastFunction)(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams);
namespace Luau
{
namespace CodeGen
{
class UnwindBuilder;
struct NativeContext
{
// Gateway (C => native transition) entry & exit, compiled at runtime
uint8_t* gateEntry = nullptr;
uint8_t* gateExit = nullptr;
// Helper functions, implemented in C
int (*luaV_lessthan)(lua_State* L, const TValue* l, const TValue* r) = nullptr;
int (*luaV_lessequal)(lua_State* L, const TValue* l, const TValue* r) = nullptr;
int (*luaV_equalval)(lua_State* L, const TValue* t1, const TValue* t2) = nullptr;
void (*luaV_doarith)(lua_State* L, StkId ra, const TValue* rb, const TValue* rc, TMS op) = nullptr;
void (*luaV_dolen)(lua_State* L, StkId ra, const TValue* rb) = nullptr;
void (*luaV_prepareFORN)(lua_State* L, StkId plimit, StkId pstep, StkId pinit) = nullptr;
void (*luaV_gettable)(lua_State* L, const TValue* t, TValue* key, StkId val) = nullptr;
void (*luaV_settable)(lua_State* L, const TValue* t, TValue* key, StkId val) = nullptr;
void (*luaV_getimport)(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil) = nullptr;
void (*luaV_concat)(lua_State* L, int total, int last) = nullptr;
int (*luaH_getn)(Table* t) = nullptr;
Table* (*luaH_new)(lua_State* L, int narray, int lnhash) = nullptr;
Table* (*luaH_clone)(lua_State* L, Table* tt) = nullptr;
void (*luaH_resizearray)(lua_State* L, Table* t, int nasize) = nullptr;
void (*luaC_barriertable)(lua_State* L, Table* t, GCObject* v) = nullptr;
void (*luaC_barrierf)(lua_State* L, GCObject* o, GCObject* v) = nullptr;
void (*luaC_barrierback)(lua_State* L, GCObject* o, GCObject** gclist) = nullptr;
size_t (*luaC_step)(lua_State* L, bool assist) = nullptr;
void (*luaF_close)(lua_State* L, StkId level) = nullptr;
const TValue* (*luaT_gettm)(Table* events, TMS event, TString* ename) = nullptr;
const TString* (*luaT_objtypenamestr)(lua_State* L, const TValue* o) = nullptr;
double (*libm_exp)(double) = nullptr;
double (*libm_pow)(double, double) = nullptr;
double (*libm_fmod)(double, double) = nullptr;
double (*libm_asin)(double) = nullptr;
double (*libm_sin)(double) = nullptr;
double (*libm_sinh)(double) = nullptr;
double (*libm_acos)(double) = nullptr;
double (*libm_cos)(double) = nullptr;
double (*libm_cosh)(double) = nullptr;
double (*libm_atan)(double) = nullptr;
double (*libm_atan2)(double, double) = nullptr;
double (*libm_tan)(double) = nullptr;
double (*libm_tanh)(double) = nullptr;
double (*libm_log)(double) = nullptr;
double (*libm_log2)(double) = nullptr;
double (*libm_log10)(double) = nullptr;
double (*libm_ldexp)(double, int) = nullptr;
double (*libm_round)(double) = nullptr;
double (*libm_frexp)(double, int*) = nullptr;
double (*libm_modf)(double, double*) = nullptr;
// Helper functions
bool (*forgLoopTableIter)(lua_State* L, Table* h, int index, TValue* ra) = nullptr;
bool (*forgLoopNodeIter)(lua_State* L, Table* h, int index, TValue* ra) = nullptr;
bool (*forgLoopNonTableFallback)(lua_State* L, int insnA, int aux) = nullptr;
void (*forgPrepXnextFallback)(lua_State* L, TValue* ra, int pc) = nullptr;
Closure* (*callProlog)(lua_State* L, TValue* ra, StkId argtop, int nresults) = nullptr;
void (*callEpilogC)(lua_State* L, int nresults, int n) = nullptr;
Closure* (*callFallback)(lua_State* L, StkId ra, StkId argtop, int nresults) = nullptr;
// Opcode fallbacks, implemented in C
const Instruction* (*executeGETGLOBAL)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executeSETGLOBAL)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executeGETTABLEKS)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executeSETTABLEKS)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executeNEWCLOSURE)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executeNAMECALL)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executeSETLIST)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executeFORGPREP)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executeGETVARARGS)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executeDUPCLOSURE)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
const Instruction* (*executePREPVARARGS)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
// Fast call methods, implemented in C
luau_FastFunction luauF_table[256] = {};
};
using GateFn = int (*)(lua_State*, Proto*, uintptr_t, NativeContext*);
struct NativeState
{
NativeState();
~NativeState();
CodeAllocator codeAllocator;
std::unique_ptr<UnwindBuilder> unwindBuilder;
uint8_t* gateData = nullptr;
size_t gateDataSize = 0;
NativeContext context;
};
void initFunctions(NativeState& data);
} // namespace CodeGen
} // namespace Luau

File diff suppressed because it is too large Load Diff

View File

@ -1,109 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/OptimizeFinalX64.h"
#include "Luau/IrUtils.h"
#include <utility>
namespace Luau
{
namespace CodeGen
{
// x64 assembly allows memory operands, but IR separates loads from uses
// To improve final x64 lowering, we try to 'inline' single-use register/constant loads into some of our instructions
// This pass might not be useful on different architectures
static void optimizeMemoryOperandsX64(IrFunction& function, IrBlock& block)
{
LUAU_ASSERT(block.kind != IrBlockKind::Dead);
for (uint32_t index = block.start; index <= block.finish; index++)
{
LUAU_ASSERT(index < function.instructions.size());
IrInst& inst = function.instructions[index];
switch (inst.cmd)
{
case IrCmd::CHECK_TAG:
{
if (inst.a.kind == IrOpKind::Inst)
{
IrInst& tag = function.instOp(inst.a);
if (tag.useCount == 1 && tag.cmd == IrCmd::LOAD_TAG && (tag.a.kind == IrOpKind::VmReg || tag.a.kind == IrOpKind::VmConst))
replace(function, inst.a, tag.a);
}
break;
}
case IrCmd::ADD_NUM:
case IrCmd::SUB_NUM:
case IrCmd::MUL_NUM:
case IrCmd::DIV_NUM:
case IrCmd::MOD_NUM:
case IrCmd::MIN_NUM:
case IrCmd::MAX_NUM:
{
if (inst.b.kind == IrOpKind::Inst)
{
IrInst& rhs = function.instOp(inst.b);
if (rhs.useCount == 1 && rhs.cmd == IrCmd::LOAD_DOUBLE && (rhs.a.kind == IrOpKind::VmReg || rhs.a.kind == IrOpKind::VmConst))
replace(function, inst.b, rhs.a);
}
break;
}
case IrCmd::JUMP_EQ_TAG:
{
if (inst.a.kind == IrOpKind::Inst)
{
IrInst& tagA = function.instOp(inst.a);
if (tagA.useCount == 1 && tagA.cmd == IrCmd::LOAD_TAG && (tagA.a.kind == IrOpKind::VmReg || tagA.a.kind == IrOpKind::VmConst))
{
replace(function, inst.a, tagA.a);
break;
}
}
if (inst.b.kind == IrOpKind::Inst)
{
IrInst& tagB = function.instOp(inst.b);
if (tagB.useCount == 1 && tagB.cmd == IrCmd::LOAD_TAG && (tagB.a.kind == IrOpKind::VmReg || tagB.a.kind == IrOpKind::VmConst))
{
std::swap(inst.a, inst.b);
replace(function, inst.a, tagB.a);
}
}
break;
}
case IrCmd::JUMP_CMP_NUM:
{
if (inst.a.kind == IrOpKind::Inst)
{
IrInst& num = function.instOp(inst.a);
if (num.useCount == 1 && num.cmd == IrCmd::LOAD_DOUBLE)
replace(function, inst.a, num.a);
}
break;
}
default:
break;
}
}
}
void optimizeMemoryOperandsX64(IrFunction& function)
{
for (IrBlock& block : function.blocks)
{
if (block.kind == IrBlockKind::Dead)
continue;
optimizeMemoryOperandsX64(function, block);
}
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,299 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/UnwindBuilderDwarf2.h"
#include "ByteUtils.h"
#include <string.h>
// General information about Dwarf2 format can be found at:
// https://dwarfstd.org/doc/dwarf-2.0.0.pdf [DWARF Debugging Information Format]
// Main part for async exception unwinding is in section '6.4 Call Frame Information'
// Information about System V ABI (AMD64) can be found at:
// https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf [System V Application Binary Interface (AMD64 Architecture Processor Supplement)]
// Interaction between Dwarf2 and System V ABI can be found in sections '3.6.2 DWARF Register Number Mapping' and '4.2.4 EH_FRAME sections'
// Call frame instruction opcodes (Dwarf2, page 78, ch. 7.23 figure 37)
#define DW_CFA_advance_loc 0x40
#define DW_CFA_offset 0x80
#define DW_CFA_restore 0xc0
#define DW_CFA_set_loc 0x01
#define DW_CFA_advance_loc1 0x02
#define DW_CFA_advance_loc2 0x03
#define DW_CFA_advance_loc4 0x04
#define DW_CFA_offset_extended 0x05
#define DW_CFA_restore_extended 0x06
#define DW_CFA_undefined 0x07
#define DW_CFA_same_value 0x08
#define DW_CFA_register 0x09
#define DW_CFA_remember_state 0x0a
#define DW_CFA_restore_state 0x0b
#define DW_CFA_def_cfa 0x0c
#define DW_CFA_def_cfa_register 0x0d
#define DW_CFA_def_cfa_offset 0x0e
#define DW_CFA_def_cfa_expression 0x0f
#define DW_CFA_nop 0x00
#define DW_CFA_lo_user 0x1c
#define DW_CFA_hi_user 0x3f
// Register numbers for X64 (System V ABI, page 57, ch. 3.7, figure 3.36)
#define DW_REG_X64_RAX 0
#define DW_REG_X64_RDX 1
#define DW_REG_X64_RCX 2
#define DW_REG_X64_RBX 3
#define DW_REG_X64_RSI 4
#define DW_REG_X64_RDI 5
#define DW_REG_X64_RBP 6
#define DW_REG_X64_RSP 7
#define DW_REG_X64_RA 16
// Register numbers for A64 (DWARF for the Arm 64-bit Architecture, ch. 4.1)
#define DW_REG_A64_FP 29
#define DW_REG_A64_LR 30
#define DW_REG_A64_SP 31
// X64 register mapping from real register index to DWARF2 (r8..r15 are mapped 1-1, but named registers aren't)
const int regIndexToDwRegX64[16] = {DW_REG_X64_RAX, DW_REG_X64_RCX, DW_REG_X64_RDX, DW_REG_X64_RBX, DW_REG_X64_RSP, DW_REG_X64_RBP, DW_REG_X64_RSI,
DW_REG_X64_RDI, 8, 9, 10, 11, 12, 13, 14, 15};
const int kCodeAlignFactor = 1;
const int kDataAlignFactor = 8;
const int kDwarfAlign = 8;
const int kFdeInitialLocationOffset = 8;
const int kFdeAddressRangeOffset = 16;
// Define canonical frame address expression as [reg + offset]
static uint8_t* defineCfaExpression(uint8_t* pos, int dwReg, uint32_t stackOffset)
{
pos = writeu8(pos, DW_CFA_def_cfa);
pos = writeuleb128(pos, dwReg);
pos = writeuleb128(pos, stackOffset);
return pos;
}
// Update offset value in canonical frame address expression
static uint8_t* defineCfaExpressionOffset(uint8_t* pos, uint32_t stackOffset)
{
pos = writeu8(pos, DW_CFA_def_cfa_offset);
pos = writeuleb128(pos, stackOffset);
return pos;
}
static uint8_t* defineSavedRegisterLocation(uint8_t* pos, int dwReg, uint32_t stackOffset)
{
LUAU_ASSERT(stackOffset % kDataAlignFactor == 0 && "stack offsets have to be measured in kDataAlignFactor units");
if (dwReg <= 0x3f)
{
pos = writeu8(pos, DW_CFA_offset + dwReg);
}
else
{
pos = writeu8(pos, DW_CFA_offset_extended);
pos = writeuleb128(pos, dwReg);
}
pos = writeuleb128(pos, stackOffset / kDataAlignFactor);
return pos;
}
static uint8_t* advanceLocation(uint8_t* pos, unsigned int offset)
{
LUAU_ASSERT(offset < 256);
pos = writeu8(pos, DW_CFA_advance_loc1);
pos = writeu8(pos, offset);
return pos;
}
static uint8_t* alignPosition(uint8_t* start, uint8_t* pos)
{
size_t size = pos - start;
size_t pad = ((size + kDwarfAlign - 1) & ~(kDwarfAlign - 1)) - size;
for (size_t i = 0; i < pad; i++)
pos = writeu8(pos, DW_CFA_nop);
return pos;
}
namespace Luau
{
namespace CodeGen
{
void UnwindBuilderDwarf2::setBeginOffset(size_t beginOffset)
{
this->beginOffset = beginOffset;
}
size_t UnwindBuilderDwarf2::getBeginOffset() const
{
return beginOffset;
}
void UnwindBuilderDwarf2::startInfo(Arch arch)
{
LUAU_ASSERT(arch == A64 || arch == X64);
uint8_t* cieLength = pos;
pos = writeu32(pos, 0); // Length (to be filled later)
pos = writeu32(pos, 0); // CIE id. 0 -- .eh_frame
pos = writeu8(pos, 1); // Version
pos = writeu8(pos, 0); // CIE augmentation String ""
int ra = arch == A64 ? DW_REG_A64_LR : DW_REG_X64_RA;
pos = writeuleb128(pos, kCodeAlignFactor); // Code align factor
pos = writeuleb128(pos, -kDataAlignFactor & 0x7f); // Data align factor of (as signed LEB128)
pos = writeu8(pos, ra); // Return address register
// Optional CIE augmentation section (not present)
// Call frame instructions (common for all FDEs)
if (arch == A64)
{
pos = defineCfaExpression(pos, DW_REG_A64_SP, 0); // Define CFA to be the sp
}
else
{
pos = defineCfaExpression(pos, DW_REG_X64_RSP, 8); // Define CFA to be the rsp + 8
pos = defineSavedRegisterLocation(pos, DW_REG_X64_RA, 8); // Define return address register (RA) to be located at CFA - 8
}
pos = alignPosition(cieLength, pos);
writeu32(cieLength, unsigned(pos - cieLength - 4)); // Length field itself is excluded from length
}
void UnwindBuilderDwarf2::startFunction()
{
// End offset is filled in later and everything gets adjusted at the end
UnwindFunctionDwarf2 func;
func.beginOffset = 0;
func.endOffset = 0;
func.fdeEntryStartPos = uint32_t(pos - rawData);
unwindFunctions.push_back(func);
fdeEntryStart = pos; // Will be written at the end
pos = writeu32(pos, 0); // Length (to be filled later)
pos = writeu32(pos, unsigned(pos - rawData)); // CIE pointer
pos = writeu64(pos, 0); // Initial location (to be filled later)
pos = writeu64(pos, 0); // Address range (to be filled later)
// Optional CIE augmentation section (not present)
// Function call frame instructions to follow
}
void UnwindBuilderDwarf2::finishFunction(uint32_t beginOffset, uint32_t endOffset)
{
unwindFunctions.back().beginOffset = beginOffset;
unwindFunctions.back().endOffset = endOffset;
LUAU_ASSERT(fdeEntryStart != nullptr);
pos = alignPosition(fdeEntryStart, pos);
writeu32(fdeEntryStart, unsigned(pos - fdeEntryStart - 4)); // Length field itself is excluded from length
}
void UnwindBuilderDwarf2::finishInfo()
{
// Terminate section
pos = writeu32(pos, 0);
LUAU_ASSERT(getSize() <= kRawDataLimit);
}
void UnwindBuilderDwarf2::prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list<A64::RegisterA64> regs)
{
LUAU_ASSERT(stackSize % 16 == 0);
LUAU_ASSERT(regs.size() >= 2 && regs.begin()[0] == A64::x29 && regs.begin()[1] == A64::x30);
LUAU_ASSERT(regs.size() * 8 <= stackSize);
// sub sp, sp, stackSize
pos = advanceLocation(pos, 4);
pos = defineCfaExpressionOffset(pos, stackSize);
// stp/str to store each register to stack in order
pos = advanceLocation(pos, prologueSize - 4);
for (size_t i = 0; i < regs.size(); ++i)
{
LUAU_ASSERT(regs.begin()[i].kind == A64::KindA64::x);
pos = defineSavedRegisterLocation(pos, regs.begin()[i].index, stackSize - unsigned(i * 8));
}
}
void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list<X64::RegisterX64> regs)
{
LUAU_ASSERT(stackSize > 0 && stackSize <= 128 && stackSize % 8 == 0);
unsigned int stackOffset = 8; // Return address was pushed by calling the function
unsigned int prologueOffset = 0;
if (setupFrame)
{
// push rbp
stackOffset += 8;
prologueOffset += 2;
pos = advanceLocation(pos, 2);
pos = defineCfaExpressionOffset(pos, stackOffset);
pos = defineSavedRegisterLocation(pos, DW_REG_X64_RBP, stackOffset);
// mov rbp, rsp
prologueOffset += 3;
pos = advanceLocation(pos, 3);
}
// push reg
for (X64::RegisterX64 reg : regs)
{
LUAU_ASSERT(reg.size == X64::SizeX64::qword);
stackOffset += 8;
prologueOffset += 2;
pos = advanceLocation(pos, 2);
pos = defineCfaExpressionOffset(pos, stackOffset);
pos = defineSavedRegisterLocation(pos, regIndexToDwRegX64[reg.index], stackOffset);
}
// sub rsp, stackSize
stackOffset += stackSize;
prologueOffset += 4;
pos = advanceLocation(pos, 4);
pos = defineCfaExpressionOffset(pos, stackOffset);
LUAU_ASSERT(stackOffset % 16 == 0);
LUAU_ASSERT(prologueOffset == prologueSize);
}
size_t UnwindBuilderDwarf2::getSize() const
{
return size_t(pos - rawData);
}
size_t UnwindBuilderDwarf2::getFunctionCount() const
{
return unwindFunctions.size();
}
void UnwindBuilderDwarf2::finalize(char* target, size_t offset, void* funcAddress, size_t funcSize) const
{
memcpy(target, rawData, getSize());
for (const UnwindFunctionDwarf2& func : unwindFunctions)
{
uint8_t* fdeEntry = (uint8_t*)target + func.fdeEntryStartPos;
writeu64(fdeEntry + kFdeInitialLocationOffset, uintptr_t(funcAddress) + offset + func.beginOffset);
if (func.endOffset == kFullBlockFuncton)
writeu64(fdeEntry + kFdeAddressRangeOffset, funcSize - offset);
else
writeu64(fdeEntry + kFdeAddressRangeOffset, func.endOffset - func.beginOffset);
}
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,190 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/UnwindBuilderWin.h"
#include <string.h>
// Information about the Windows x64 unwinding data setup can be found at:
// https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64 [x64 exception handling]
#define UWOP_PUSH_NONVOL 0
#define UWOP_ALLOC_LARGE 1
#define UWOP_ALLOC_SMALL 2
#define UWOP_SET_FPREG 3
#define UWOP_SAVE_NONVOL 4
#define UWOP_SAVE_NONVOL_FAR 5
#define UWOP_SAVE_XMM128 8
#define UWOP_SAVE_XMM128_FAR 9
#define UWOP_PUSH_MACHFRAME 10
namespace Luau
{
namespace CodeGen
{
void UnwindBuilderWin::setBeginOffset(size_t beginOffset)
{
this->beginOffset = beginOffset;
}
size_t UnwindBuilderWin::getBeginOffset() const
{
return beginOffset;
}
void UnwindBuilderWin::startInfo(Arch arch)
{
LUAU_ASSERT(arch == X64);
}
void UnwindBuilderWin::startFunction()
{
// End offset is filled in later and everything gets adjusted at the end
UnwindFunctionWin func;
func.beginOffset = 0;
func.endOffset = 0;
func.unwindInfoOffset = uint32_t(rawDataPos - rawData);
unwindFunctions.push_back(func);
unwindCodes.clear();
unwindCodes.reserve(16);
prologSize = 0;
// rax has register index 0, which in Windows unwind info means that frame register is not used
frameReg = X64::rax;
frameRegOffset = 0;
}
void UnwindBuilderWin::finishFunction(uint32_t beginOffset, uint32_t endOffset)
{
unwindFunctions.back().beginOffset = beginOffset;
unwindFunctions.back().endOffset = endOffset;
// Windows unwind code count is stored in uint8_t, so we can't have more
LUAU_ASSERT(unwindCodes.size() < 256);
UnwindInfoWin info;
info.version = 1;
info.flags = 0; // No EH
info.prologsize = prologSize;
info.unwindcodecount = uint8_t(unwindCodes.size());
LUAU_ASSERT(frameReg.index < 16);
info.framereg = frameReg.index;
LUAU_ASSERT(frameRegOffset < 16);
info.frameregoff = frameRegOffset;
LUAU_ASSERT(rawDataPos + sizeof(info) <= rawData + kRawDataLimit);
memcpy(rawDataPos, &info, sizeof(info));
rawDataPos += sizeof(info);
if (!unwindCodes.empty())
{
// Copy unwind codes in reverse order
// Some unwind codes take up two array slots, but we don't use those atm
uint8_t* unwindCodePos = rawDataPos + sizeof(UnwindCodeWin) * (unwindCodes.size() - 1);
LUAU_ASSERT(unwindCodePos <= rawData + kRawDataLimit);
for (size_t i = 0; i < unwindCodes.size(); i++)
{
memcpy(unwindCodePos, &unwindCodes[i], sizeof(UnwindCodeWin));
unwindCodePos -= sizeof(UnwindCodeWin);
}
}
rawDataPos += sizeof(UnwindCodeWin) * unwindCodes.size();
// Size has to be even, but unwind code count doesn't have to
if (unwindCodes.size() % 2 != 0)
rawDataPos += sizeof(UnwindCodeWin);
LUAU_ASSERT(rawDataPos <= rawData + kRawDataLimit);
}
void UnwindBuilderWin::finishInfo() {}
void UnwindBuilderWin::prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list<A64::RegisterA64> regs)
{
LUAU_ASSERT(!"Not implemented");
}
void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list<X64::RegisterX64> regs)
{
LUAU_ASSERT(stackSize > 0 && stackSize <= 128 && stackSize % 8 == 0);
LUAU_ASSERT(prologueSize < 256);
unsigned int stackOffset = 8; // Return address was pushed by calling the function
unsigned int prologueOffset = 0;
if (setupFrame)
{
// push rbp
stackOffset += 8;
prologueOffset += 2;
unwindCodes.push_back({uint8_t(prologueOffset), UWOP_PUSH_NONVOL, X64::rbp.index});
// mov rbp, rsp
prologueOffset += 3;
frameReg = X64::rbp;
frameRegOffset = 0;
unwindCodes.push_back({uint8_t(prologueOffset), UWOP_SET_FPREG, frameRegOffset});
}
// push reg
for (X64::RegisterX64 reg : regs)
{
LUAU_ASSERT(reg.size == X64::SizeX64::qword);
stackOffset += 8;
prologueOffset += 2;
unwindCodes.push_back({uint8_t(prologueOffset), UWOP_PUSH_NONVOL, reg.index});
}
// sub rsp, stackSize
stackOffset += stackSize;
prologueOffset += 4;
unwindCodes.push_back({uint8_t(prologueOffset), UWOP_ALLOC_SMALL, uint8_t((stackSize - 8) / 8)});
LUAU_ASSERT(stackOffset % 16 == 0);
LUAU_ASSERT(prologueOffset == prologueSize);
this->prologSize = prologueSize;
}
size_t UnwindBuilderWin::getSize() const
{
return sizeof(UnwindFunctionWin) * unwindFunctions.size() + size_t(rawDataPos - rawData);
}
size_t UnwindBuilderWin::getFunctionCount() const
{
return unwindFunctions.size();
}
void UnwindBuilderWin::finalize(char* target, size_t offset, void* funcAddress, size_t funcSize) const
{
// Copy adjusted function information
for (UnwindFunctionWin func : unwindFunctions)
{
// Code will start after the unwind info
func.beginOffset += uint32_t(offset);
// Whole block is a part of a 'single function'
if (func.endOffset == kFullBlockFuncton)
func.endOffset = uint32_t(funcSize);
else
func.endOffset += uint32_t(offset);
// Unwind data is placed right after the RUNTIME_FUNCTION data
func.unwindInfoOffset += uint32_t(sizeof(UnwindFunctionWin) * unwindFunctions.size());
memcpy(target, &func, sizeof(func));
target += sizeof(func);
}
// Copy unwind codes
memcpy(target, rawData, size_t(rawDataPos - rawData));
}
} // namespace CodeGen
} // namespace Luau

View File

@ -1,21 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "luacodegen.h"
#include "Luau/CodeGen.h"
#include "lapi.h"
int luau_codegen_supported()
{
return Luau::CodeGen::isSupported();
}
void luau_codegen_create(lua_State* L)
{
Luau::CodeGen::create(L);
}
void luau_codegen_compile(lua_State* L, int idx)
{
Luau::CodeGen::compile(L, idx);
}

View File

@ -1,554 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
// clang-format off
// This header contains the bytecode definition for Luau interpreter
// Creating the bytecode is outside the scope of this file and is handled by bytecode builder (BytecodeBuilder.h) and bytecode compiler (Compiler.h)
// Note that ALL enums declared in this file are order-sensitive since the values are baked into bytecode that needs to be processed by legacy clients.
// # Bytecode definitions
// Bytecode instructions are using "word code" - each instruction is one or many 32-bit words.
// The first word in the instruction is always the instruction header, and *must* contain the opcode (enum below) in the least significant byte.
//
// Instruction word can be encoded using one of the following encodings:
// ABC - least-significant byte for the opcode, followed by three bytes, A, B and C; each byte declares a register index, small index into some other table or an unsigned integral value
// AD - least-significant byte for the opcode, followed by A byte, followed by D half-word (16-bit integer). D is a signed integer that commonly specifies constant table index or jump offset
// E - least-significant byte for the opcode, followed by E (24-bit integer). E is a signed integer that commonly specifies a jump offset
//
// Instruction word is sometimes followed by one extra word, indicated as AUX - this is just a 32-bit word and is decoded according to the specification for each opcode.
// For each opcode the encoding is *static* - that is, based on the opcode you know a-priory how large the instruction is, with the exception of NEWCLOSURE
// # Bytecode indices
// Bytecode instructions commonly refer to integer values that define offsets or indices for various entities. For each type, there's a maximum encodable value.
// Note that in some cases, the compiler will set a lower limit than the maximum encodable value is to prevent fragile code into bumping against the limits whenever we change the compilation details.
// Additionally, in some specific instructions such as ANDK, the limit on the encoded value is smaller; this means that if a value is larger, a different instruction must be selected.
//
// Registers: 0-254. Registers refer to the values on the function's stack frame, including arguments.
// Upvalues: 0-199. Upvalues refer to the values stored in the closure object.
// Constants: 0-2^23-1. Constants are stored in a table allocated with each proto; to allow for future bytecode tweaks the encodable value is limited to 23 bits.
// Closures: 0-2^15-1. Closures are created from child protos via a child index; the limit is for the number of closures immediately referenced in each function.
// Jumps: -2^23..2^23. Jump offsets are specified in word increments, so jumping over an instruction may sometimes require an offset of 2 or more. Note that for jump instructions with AUX, the AUX word is included as part of the jump offset.
// # Bytecode versions
// Bytecode serialized format embeds a version number, that dictates both the serialized form as well as the allowed instructions. As long as the bytecode version falls into supported
// range (indicated by LBC_BYTECODE_MIN / LBC_BYTECODE_MAX) and was produced by Luau compiler, it should load and execute correctly.
//
// Note that Luau runtime doesn't provide indefinite bytecode compatibility: support for older versions gets removed over time. As such, bytecode isn't a durable storage format and it's expected
// that Luau users can recompile bytecode from source on Luau version upgrades if necessary.
// # Bytecode version history
//
// Note: due to limitations of the versioning scheme, some bytecode blobs that carry version 2 are using features from version 3. Starting from version 3, version should be sufficient to indicate bytecode compatibility.
//
// Version 1: Baseline version for the open-source release. Supported until 0.521.
// Version 2: Adds Proto::linedefined. Supported until 0.544.
// Version 3: Adds FORGPREP/JUMPXEQK* and enhances AUX encoding for FORGLOOP. Removes FORGLOOP_NEXT/INEXT and JUMPIFEQK/JUMPIFNOTEQK. Currently supported.
// Bytecode opcode, part of the instruction header
enum LuauOpcode
{
// NOP: noop
LOP_NOP,
// BREAK: debugger break
LOP_BREAK,
// LOADNIL: sets register to nil
// A: target register
LOP_LOADNIL,
// LOADB: sets register to boolean and jumps to a given short offset (used to compile comparison results into a boolean)
// A: target register
// B: value (0/1)
// C: jump offset
LOP_LOADB,
// LOADN: sets register to a number literal
// A: target register
// D: value (-32768..32767)
LOP_LOADN,
// LOADK: sets register to an entry from the constant table from the proto (number/string)
// A: target register
// D: constant table index (0..32767)
LOP_LOADK,
// MOVE: move (copy) value from one register to another
// A: target register
// B: source register
LOP_MOVE,
// GETGLOBAL: load value from global table using constant string as a key
// A: target register
// C: predicted slot index (based on hash)
// AUX: constant table index
LOP_GETGLOBAL,
// SETGLOBAL: set value in global table using constant string as a key
// A: source register
// C: predicted slot index (based on hash)
// AUX: constant table index
LOP_SETGLOBAL,
// GETUPVAL: load upvalue from the upvalue table for the current function
// A: target register
// B: upvalue index
LOP_GETUPVAL,
// SETUPVAL: store value into the upvalue table for the current function
// A: target register
// B: upvalue index
LOP_SETUPVAL,
// CLOSEUPVALS: close (migrate to heap) all upvalues that were captured for registers >= target
// A: target register
LOP_CLOSEUPVALS,
// GETIMPORT: load imported global table global from the constant table
// A: target register
// D: constant table index (0..32767); we assume that imports are loaded into the constant table
// AUX: 3 10-bit indices of constant strings that, combined, constitute an import path; length of the path is set by the top 2 bits (1,2,3)
LOP_GETIMPORT,
// GETTABLE: load value from table into target register using key from register
// A: target register
// B: table register
// C: index register
LOP_GETTABLE,
// SETTABLE: store source register into table using key from register
// A: source register
// B: table register
// C: index register
LOP_SETTABLE,
// GETTABLEKS: load value from table into target register using constant string as a key
// A: target register
// B: table register
// C: predicted slot index (based on hash)
// AUX: constant table index
LOP_GETTABLEKS,
// SETTABLEKS: store source register into table using constant string as a key
// A: source register
// B: table register
// C: predicted slot index (based on hash)
// AUX: constant table index
LOP_SETTABLEKS,
// GETTABLEN: load value from table into target register using small integer index as a key
// A: target register
// B: table register
// C: index-1 (index is 1..256)
LOP_GETTABLEN,
// SETTABLEN: store source register into table using small integer index as a key
// A: source register
// B: table register
// C: index-1 (index is 1..256)
LOP_SETTABLEN,
// NEWCLOSURE: create closure from a child proto; followed by a CAPTURE instruction for each upvalue
// A: target register
// D: child proto index (0..32767)
LOP_NEWCLOSURE,
// NAMECALL: prepare to call specified method by name by loading function from source register using constant index into target register and copying source register into target register + 1
// A: target register
// B: source register
// C: predicted slot index (based on hash)
// AUX: constant table index
// Note that this instruction must be followed directly by CALL; it prepares the arguments
// This instruction is roughly equivalent to GETTABLEKS + MOVE pair, but we need a special instruction to support custom __namecall metamethod
LOP_NAMECALL,
// CALL: call specified function
// A: register where the function object lives, followed by arguments; results are placed starting from the same register
// B: argument count + 1, or 0 to preserve all arguments up to top (MULTRET)
// C: result count + 1, or 0 to preserve all values and adjust top (MULTRET)
LOP_CALL,
// RETURN: returns specified values from the function
// A: register where the returned values start
// B: number of returned values + 1, or 0 to return all values up to top (MULTRET)
LOP_RETURN,
// JUMP: jumps to target offset
// D: jump offset (-32768..32767; 0 means "next instruction" aka "don't jump")
LOP_JUMP,
// JUMPBACK: jumps to target offset; this is equivalent to JUMP but is used as a safepoint to be able to interrupt while/repeat loops
// D: jump offset (-32768..32767; 0 means "next instruction" aka "don't jump")
LOP_JUMPBACK,
// JUMPIF: jumps to target offset if register is not nil/false
// A: source register
// D: jump offset (-32768..32767; 0 means "next instruction" aka "don't jump")
LOP_JUMPIF,
// JUMPIFNOT: jumps to target offset if register is nil/false
// A: source register
// D: jump offset (-32768..32767; 0 means "next instruction" aka "don't jump")
LOP_JUMPIFNOT,
// JUMPIFEQ, JUMPIFLE, JUMPIFLT, JUMPIFNOTEQ, JUMPIFNOTLE, JUMPIFNOTLT: jumps to target offset if the comparison is true (or false, for NOT variants)
// A: source register 1
// D: jump offset (-32768..32767; 1 means "next instruction" aka "don't jump")
// AUX: source register 2
LOP_JUMPIFEQ,
LOP_JUMPIFLE,
LOP_JUMPIFLT,
LOP_JUMPIFNOTEQ,
LOP_JUMPIFNOTLE,
LOP_JUMPIFNOTLT,
// ADD, SUB, MUL, DIV, MOD, POW: compute arithmetic operation between two source registers and put the result into target register
// A: target register
// B: source register 1
// C: source register 2
LOP_ADD,
LOP_SUB,
LOP_MUL,
LOP_DIV,
LOP_MOD,
LOP_POW,
// ADDK, SUBK, MULK, DIVK, MODK, POWK: compute arithmetic operation between the source register and a constant and put the result into target register
// A: target register
// B: source register
// C: constant table index (0..255)
LOP_ADDK,
LOP_SUBK,
LOP_MULK,
LOP_DIVK,
LOP_MODK,
LOP_POWK,
// AND, OR: perform `and` or `or` operation (selecting first or second register based on whether the first one is truthy) and put the result into target register
// A: target register
// B: source register 1
// C: source register 2
LOP_AND,
LOP_OR,
// ANDK, ORK: perform `and` or `or` operation (selecting source register or constant based on whether the source register is truthy) and put the result into target register
// A: target register
// B: source register
// C: constant table index (0..255)
LOP_ANDK,
LOP_ORK,
// CONCAT: concatenate all strings between B and C (inclusive) and put the result into A
// A: target register
// B: source register start
// C: source register end
LOP_CONCAT,
// NOT, MINUS, LENGTH: compute unary operation for source register and put the result into target register
// A: target register
// B: source register
LOP_NOT,
LOP_MINUS,
LOP_LENGTH,
// NEWTABLE: create table in target register
// A: target register
// B: table size, stored as 0 for v=0 and ceil(log2(v))+1 for v!=0
// AUX: array size
LOP_NEWTABLE,
// DUPTABLE: duplicate table using the constant table template to target register
// A: target register
// D: constant table index (0..32767)
LOP_DUPTABLE,
// SETLIST: set a list of values to table in target register
// A: target register
// B: source register start
// C: value count + 1, or 0 to use all values up to top (MULTRET)
// AUX: table index to start from
LOP_SETLIST,
// FORNPREP: prepare a numeric for loop, jump over the loop if first iteration doesn't need to run
// A: target register; numeric for loops assume a register layout [limit, step, index, variable]
// D: jump offset (-32768..32767)
// limit/step are immutable, index isn't visible to user code since it's copied into variable
LOP_FORNPREP,
// FORNLOOP: adjust loop variables for one iteration, jump back to the loop header if loop needs to continue
// A: target register; see FORNPREP for register layout
// D: jump offset (-32768..32767)
LOP_FORNLOOP,
// FORGLOOP: adjust loop variables for one iteration of a generic for loop, jump back to the loop header if loop needs to continue
// A: target register; generic for loops assume a register layout [generator, state, index, variables...]
// D: jump offset (-32768..32767)
// AUX: variable count (1..255) in the low 8 bits, high bit indicates whether to use ipairs-style traversal in the fast path
// loop variables are adjusted by calling generator(state, index) and expecting it to return a tuple that's copied to the user variables
// the first variable is then copied into index; generator/state are immutable, index isn't visible to user code
LOP_FORGLOOP,
// FORGPREP_INEXT: prepare FORGLOOP with 2 output variables (no AUX encoding), assuming generator is luaB_inext, and jump to FORGLOOP
// A: target register (see FORGLOOP for register layout)
LOP_FORGPREP_INEXT,
// removed in v3
LOP_DEP_FORGLOOP_INEXT,
// FORGPREP_NEXT: prepare FORGLOOP with 2 output variables (no AUX encoding), assuming generator is luaB_next, and jump to FORGLOOP
// A: target register (see FORGLOOP for register layout)
LOP_FORGPREP_NEXT,
// NATIVECALL: start executing new function in native code
// this is a pseudo-instruction that is never emitted by bytecode compiler, but can be constructed at runtime to accelerate native code dispatch
LOP_NATIVECALL,
// GETVARARGS: copy variables into the target register from vararg storage for current function
// A: target register
// B: variable count + 1, or 0 to copy all variables and adjust top (MULTRET)
LOP_GETVARARGS,
// DUPCLOSURE: create closure from a pre-created function object (reusing it unless environments diverge)
// A: target register
// D: constant table index (0..32767)
LOP_DUPCLOSURE,
// PREPVARARGS: prepare stack for variadic functions so that GETVARARGS works correctly
// A: number of fixed arguments
LOP_PREPVARARGS,
// LOADKX: sets register to an entry from the constant table from the proto (number/string)
// A: target register
// AUX: constant table index
LOP_LOADKX,
// JUMPX: jumps to the target offset; like JUMPBACK, supports interruption
// E: jump offset (-2^23..2^23; 0 means "next instruction" aka "don't jump")
LOP_JUMPX,
// FASTCALL: perform a fast call of a built-in function
// A: builtin function id (see LuauBuiltinFunction)
// C: jump offset to get to following CALL
// FASTCALL is followed by one of (GETIMPORT, MOVE, GETUPVAL) instructions and by CALL instruction
// This is necessary so that if FASTCALL can't perform the call inline, it can continue normal execution
// If FASTCALL *can* perform the call, it jumps over the instructions *and* over the next CALL
// Note that FASTCALL will read the actual call arguments, such as argument/result registers and counts, from the CALL instruction
LOP_FASTCALL,
// COVERAGE: update coverage information stored in the instruction
// E: hit count for the instruction (0..2^23-1)
// The hit count is incremented by VM every time the instruction is executed, and saturates at 2^23-1
LOP_COVERAGE,
// CAPTURE: capture a local or an upvalue as an upvalue into a newly created closure; only valid after NEWCLOSURE
// A: capture type, see LuauCaptureType
// B: source register (for VAL/REF) or upvalue index (for UPVAL/UPREF)
LOP_CAPTURE,
// removed in v3
LOP_DEP_JUMPIFEQK,
LOP_DEP_JUMPIFNOTEQK,
// FASTCALL1: perform a fast call of a built-in function using 1 register argument
// A: builtin function id (see LuauBuiltinFunction)
// B: source argument register
// C: jump offset to get to following CALL
LOP_FASTCALL1,
// FASTCALL2: perform a fast call of a built-in function using 2 register arguments
// A: builtin function id (see LuauBuiltinFunction)
// B: source argument register
// C: jump offset to get to following CALL
// AUX: source register 2 in least-significant byte
LOP_FASTCALL2,
// FASTCALL2K: perform a fast call of a built-in function using 1 register argument and 1 constant argument
// A: builtin function id (see LuauBuiltinFunction)
// B: source argument register
// C: jump offset to get to following CALL
// AUX: constant index
LOP_FASTCALL2K,
// FORGPREP: prepare loop variables for a generic for loop, jump to the loop backedge unconditionally
// A: target register; generic for loops assume a register layout [generator, state, index, variables...]
// D: jump offset (-32768..32767)
LOP_FORGPREP,
// JUMPXEQKNIL, JUMPXEQKB: jumps to target offset if the comparison with constant is true (or false, see AUX)
// A: source register 1
// D: jump offset (-32768..32767; 1 means "next instruction" aka "don't jump")
// AUX: constant value (for boolean) in low bit, NOT flag (that flips comparison result) in high bit
LOP_JUMPXEQKNIL,
LOP_JUMPXEQKB,
// JUMPXEQKN, JUMPXEQKS: jumps to target offset if the comparison with constant is true (or false, see AUX)
// A: source register 1
// D: jump offset (-32768..32767; 1 means "next instruction" aka "don't jump")
// AUX: constant table index in low 24 bits, NOT flag (that flips comparison result) in high bit
LOP_JUMPXEQKN,
LOP_JUMPXEQKS,
// Enum entry for number of opcodes, not a valid opcode by itself!
LOP__COUNT
};
// Bytecode instruction header: it's always a 32-bit integer, with low byte (first byte in little endian) containing the opcode
// Some instruction types require more data and have more 32-bit integers following the header
#define LUAU_INSN_OP(insn) ((insn) & 0xff)
// ABC encoding: three 8-bit values, containing registers or small numbers
#define LUAU_INSN_A(insn) (((insn) >> 8) & 0xff)
#define LUAU_INSN_B(insn) (((insn) >> 16) & 0xff)
#define LUAU_INSN_C(insn) (((insn) >> 24) & 0xff)
// AD encoding: one 8-bit value, one signed 16-bit value
#define LUAU_INSN_D(insn) (int32_t(insn) >> 16)
// E encoding: one signed 24-bit value
#define LUAU_INSN_E(insn) (int32_t(insn) >> 8)
// Bytecode tags, used internally for bytecode encoded as a string
enum LuauBytecodeTag
{
// Bytecode version; runtime supports [MIN, MAX], compiler emits TARGET by default but may emit a higher version when flags are enabled
LBC_VERSION_MIN = 3,
LBC_VERSION_MAX = 4,
LBC_VERSION_TARGET = 3,
// Type encoding version
LBC_TYPE_VERSION = 1,
// Types of constant table entries
LBC_CONSTANT_NIL = 0,
LBC_CONSTANT_BOOLEAN,
LBC_CONSTANT_NUMBER,
LBC_CONSTANT_STRING,
LBC_CONSTANT_IMPORT,
LBC_CONSTANT_TABLE,
LBC_CONSTANT_CLOSURE,
};
// Type table tags
enum LuauBytecodeEncodedType
{
LBC_TYPE_NIL = 0,
LBC_TYPE_BOOLEAN,
LBC_TYPE_NUMBER,
LBC_TYPE_STRING,
LBC_TYPE_TABLE,
LBC_TYPE_FUNCTION,
LBC_TYPE_THREAD,
LBC_TYPE_USERDATA,
LBC_TYPE_VECTOR,
LBC_TYPE_ANY = 15,
LBC_TYPE_OPTIONAL_BIT = 1 << 7,
LBC_TYPE_INVALID = 256,
};
// Builtin function ids, used in LOP_FASTCALL
enum LuauBuiltinFunction
{
LBF_NONE = 0,
// assert()
LBF_ASSERT,
// math.
LBF_MATH_ABS,
LBF_MATH_ACOS,
LBF_MATH_ASIN,
LBF_MATH_ATAN2,
LBF_MATH_ATAN,
LBF_MATH_CEIL,
LBF_MATH_COSH,
LBF_MATH_COS,
LBF_MATH_DEG,
LBF_MATH_EXP,
LBF_MATH_FLOOR,
LBF_MATH_FMOD,
LBF_MATH_FREXP,
LBF_MATH_LDEXP,
LBF_MATH_LOG10,
LBF_MATH_LOG,
LBF_MATH_MAX,
LBF_MATH_MIN,
LBF_MATH_MODF,
LBF_MATH_POW,
LBF_MATH_RAD,
LBF_MATH_SINH,
LBF_MATH_SIN,
LBF_MATH_SQRT,
LBF_MATH_TANH,
LBF_MATH_TAN,
// bit32.
LBF_BIT32_ARSHIFT,
LBF_BIT32_BAND,
LBF_BIT32_BNOT,
LBF_BIT32_BOR,
LBF_BIT32_BXOR,
LBF_BIT32_BTEST,
LBF_BIT32_EXTRACT,
LBF_BIT32_LROTATE,
LBF_BIT32_LSHIFT,
LBF_BIT32_REPLACE,
LBF_BIT32_RROTATE,
LBF_BIT32_RSHIFT,
// type()
LBF_TYPE,
// string.
LBF_STRING_BYTE,
LBF_STRING_CHAR,
LBF_STRING_LEN,
// typeof()
LBF_TYPEOF,
// string.
LBF_STRING_SUB,
// math.
LBF_MATH_CLAMP,
LBF_MATH_SIGN,
LBF_MATH_ROUND,
// raw*
LBF_RAWSET,
LBF_RAWGET,
LBF_RAWEQUAL,
// table.
LBF_TABLE_INSERT,
LBF_TABLE_UNPACK,
// vector ctor
LBF_VECTOR,
// bit32.count
LBF_BIT32_COUNTLZ,
LBF_BIT32_COUNTRZ,
// select(_, ...)
LBF_SELECT_VARARG,
// rawlen
LBF_RAWLEN,
// bit32.extract(_, k, k)
LBF_BIT32_EXTRACTK,
// get/setmetatable
LBF_GETMETATABLE,
LBF_SETMETATABLE,
};
// Capture type, used in LOP_CAPTURE
enum LuauCaptureType
{
LCT_VAL = 0,
LCT_REF,
LCT_UPVAL,
};

View File

@ -1,137 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
// Compiler codegen control macros
#ifdef _MSC_VER
#define LUAU_NORETURN __declspec(noreturn)
#define LUAU_NOINLINE __declspec(noinline)
#define LUAU_FORCEINLINE __forceinline
#define LUAU_LIKELY(x) x
#define LUAU_UNLIKELY(x) x
#define LUAU_UNREACHABLE() __assume(false)
#define LUAU_DEBUGBREAK() __debugbreak()
#else
#define LUAU_NORETURN __attribute__((__noreturn__))
#define LUAU_NOINLINE __attribute__((noinline))
#define LUAU_FORCEINLINE inline __attribute__((always_inline))
#define LUAU_LIKELY(x) __builtin_expect(x, 1)
#define LUAU_UNLIKELY(x) __builtin_expect(x, 0)
#define LUAU_UNREACHABLE() __builtin_unreachable()
#define LUAU_DEBUGBREAK() __builtin_trap()
#endif
#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define LUAU_BIG_ENDIAN
#endif
namespace Luau
{
using AssertHandler = int (*)(const char* expression, const char* file, int line, const char* function);
inline AssertHandler& assertHandler()
{
static AssertHandler handler = nullptr;
return handler;
}
// We want 'inline' to correctly link this function declared in the header
// But we also want to prevent compiler from inlining this function when optimization and assertions are enabled together
// Reason for that is that compilation times can increase significantly in such a configuration
LUAU_NOINLINE inline int assertCallHandler(const char* expression, const char* file, int line, const char* function)
{
if (AssertHandler handler = assertHandler())
return handler(expression, file, line, function);
return 1;
}
} // namespace Luau
#if !defined(NDEBUG) || defined(LUAU_ENABLE_ASSERT)
#define LUAU_ASSERT(expr) ((void)(!!(expr) || (Luau::assertCallHandler(#expr, __FILE__, __LINE__, __FUNCTION__) && (LUAU_DEBUGBREAK(), 0))))
#define LUAU_ASSERTENABLED
#else
#define LUAU_ASSERT(expr) (void)sizeof(!!(expr))
#endif
namespace Luau
{
template<typename T>
struct FValue
{
static FValue* list;
T value;
bool dynamic;
const char* name;
FValue* next;
FValue(const char* name, T def, bool dynamic)
: value(def)
, dynamic(dynamic)
, name(name)
, next(list)
{
list = this;
}
operator T() const
{
return value;
}
};
template<typename T>
FValue<T>* FValue<T>::list = nullptr;
} // namespace Luau
#define LUAU_FASTFLAG(flag) \
namespace FFlag \
{ \
extern Luau::FValue<bool> flag; \
}
#define LUAU_FASTFLAGVARIABLE(flag, def) \
namespace FFlag \
{ \
Luau::FValue<bool> flag(#flag, def, false); \
}
#define LUAU_FASTINT(flag) \
namespace FInt \
{ \
extern Luau::FValue<int> flag; \
}
#define LUAU_FASTINTVARIABLE(flag, def) \
namespace FInt \
{ \
Luau::FValue<int> flag(#flag, def, false); \
}
#define LUAU_DYNAMIC_FASTFLAG(flag) \
namespace DFFlag \
{ \
extern Luau::FValue<bool> flag; \
}
#define LUAU_DYNAMIC_FASTFLAGVARIABLE(flag, def) \
namespace DFFlag \
{ \
Luau::FValue<bool> flag(#flag, def, true); \
}
#define LUAU_DYNAMIC_FASTINT(flag) \
namespace DFInt \
{ \
extern Luau::FValue<int> flag; \
}
#define LUAU_DYNAMIC_FASTINTVARIABLE(flag, def) \
namespace DFInt \
{ \
Luau::FValue<int> flag(#flag, def, true); \
}
#if defined(__GNUC__)
#define LUAU_PRINTF_ATTR(fmt, arg) __attribute__((format(printf, fmt, arg)))
#else
#define LUAU_PRINTF_ATTR(fmt, arg)
#endif

View File

@ -1,618 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include <functional>
#include <utility>
#include <type_traits>
#include <stdint.h>
namespace Luau
{
struct DenseHashPointer
{
size_t operator()(const void* key) const
{
return (uintptr_t(key) >> 4) ^ (uintptr_t(key) >> 9);
}
};
// Internal implementation of DenseHashSet and DenseHashMap
namespace detail
{
template<typename T>
using DenseHashDefault = std::conditional_t<std::is_pointer_v<T>, DenseHashPointer, std::hash<T>>;
template<typename Key, typename Item, typename MutableItem, typename ItemInterface, typename Hash, typename Eq>
class DenseHashTable
{
public:
class const_iterator;
class iterator;
DenseHashTable(const Key& empty_key, size_t buckets = 0)
: data(nullptr)
, capacity(0)
, count(0)
, empty_key(empty_key)
{
// validate that equality operator is at least somewhat functional
LUAU_ASSERT(eq(empty_key, empty_key));
// buckets has to be power-of-two or zero
LUAU_ASSERT((buckets & (buckets - 1)) == 0);
if (buckets)
{
data = static_cast<Item*>(::operator new(sizeof(Item) * buckets));
capacity = buckets;
ItemInterface::fill(data, buckets, empty_key);
}
}
~DenseHashTable()
{
if (data)
destroy();
}
DenseHashTable(const DenseHashTable& other)
: data(nullptr)
, capacity(0)
, count(other.count)
, empty_key(other.empty_key)
{
if (other.capacity)
{
data = static_cast<Item*>(::operator new(sizeof(Item) * other.capacity));
for (size_t i = 0; i < other.capacity; ++i)
{
new (&data[i]) Item(other.data[i]);
capacity = i + 1; // if Item copy throws, capacity will note the number of initialized objects for destroy() to clean up
}
}
}
DenseHashTable(DenseHashTable&& other)
: data(other.data)
, capacity(other.capacity)
, count(other.count)
, empty_key(other.empty_key)
{
other.data = nullptr;
other.capacity = 0;
other.count = 0;
}
DenseHashTable& operator=(DenseHashTable&& other)
{
if (this != &other)
{
if (data)
destroy();
data = other.data;
capacity = other.capacity;
count = other.count;
empty_key = other.empty_key;
other.data = nullptr;
other.capacity = 0;
other.count = 0;
}
return *this;
}
DenseHashTable& operator=(const DenseHashTable& other)
{
if (this != &other)
{
DenseHashTable copy(other);
*this = std::move(copy);
}
return *this;
}
void clear()
{
if (count == 0)
return;
if (capacity > 32)
{
destroy();
}
else
{
ItemInterface::destroy(data, capacity);
ItemInterface::fill(data, capacity, empty_key);
}
count = 0;
}
void destroy()
{
ItemInterface::destroy(data, capacity);
::operator delete(data);
data = nullptr;
capacity = 0;
}
Item* insert_unsafe(const Key& key)
{
// It is invalid to insert empty_key into the table since it acts as a "entry does not exist" marker
LUAU_ASSERT(!eq(key, empty_key));
size_t hashmod = capacity - 1;
size_t bucket = hasher(key) & hashmod;
for (size_t probe = 0; probe <= hashmod; ++probe)
{
Item& probe_item = data[bucket];
// Element does not exist, insert here
if (eq(ItemInterface::getKey(probe_item), empty_key))
{
ItemInterface::setKey(probe_item, key);
count++;
return &probe_item;
}
// Element already exists
if (eq(ItemInterface::getKey(probe_item), key))
{
return &probe_item;
}
// Hash collision, quadratic probing
bucket = (bucket + probe + 1) & hashmod;
}
// Hash table is full - this should not happen
LUAU_ASSERT(false);
return NULL;
}
const Item* find(const Key& key) const
{
if (count == 0)
return 0;
if (eq(key, empty_key))
return 0;
size_t hashmod = capacity - 1;
size_t bucket = hasher(key) & hashmod;
for (size_t probe = 0; probe <= hashmod; ++probe)
{
const Item& probe_item = data[bucket];
// Element exists
if (eq(ItemInterface::getKey(probe_item), key))
return &probe_item;
// Element does not exist
if (eq(ItemInterface::getKey(probe_item), empty_key))
return NULL;
// Hash collision, quadratic probing
bucket = (bucket + probe + 1) & hashmod;
}
// Hash table is full - this should not happen
LUAU_ASSERT(false);
return NULL;
}
void rehash()
{
size_t newsize = capacity == 0 ? 16 : capacity * 2;
DenseHashTable newtable(empty_key, newsize);
for (size_t i = 0; i < capacity; ++i)
{
const Key& key = ItemInterface::getKey(data[i]);
if (!eq(key, empty_key))
{
Item* item = newtable.insert_unsafe(key);
*item = std::move(data[i]);
}
}
LUAU_ASSERT(count == newtable.count);
std::swap(data, newtable.data);
std::swap(capacity, newtable.capacity);
}
void rehash_if_full(const Key& key)
{
if (count >= capacity * 3 / 4 && !find(key))
{
rehash();
}
}
const_iterator begin() const
{
size_t start = 0;
while (start < capacity && eq(ItemInterface::getKey(data[start]), empty_key))
start++;
return const_iterator(this, start);
}
const_iterator end() const
{
return const_iterator(this, capacity);
}
iterator begin()
{
size_t start = 0;
while (start < capacity && eq(ItemInterface::getKey(data[start]), empty_key))
start++;
return iterator(this, start);
}
iterator end()
{
return iterator(this, capacity);
}
size_t size() const
{
return count;
}
class const_iterator
{
public:
const_iterator()
: set(0)
, index(0)
{
}
const_iterator(const DenseHashTable<Key, Item, MutableItem, ItemInterface, Hash, Eq>* set, size_t index)
: set(set)
, index(index)
{
}
const Item& operator*() const
{
return set->data[index];
}
const Item* operator->() const
{
return &set->data[index];
}
bool operator==(const const_iterator& other) const
{
return set == other.set && index == other.index;
}
bool operator!=(const const_iterator& other) const
{
return set != other.set || index != other.index;
}
const_iterator& operator++()
{
size_t size = set->capacity;
do
{
index++;
} while (index < size && set->eq(ItemInterface::getKey(set->data[index]), set->empty_key));
return *this;
}
const_iterator operator++(int)
{
const_iterator res = *this;
++*this;
return res;
}
private:
const DenseHashTable<Key, Item, MutableItem, ItemInterface, Hash, Eq>* set;
size_t index;
};
class iterator
{
public:
iterator()
: set(0)
, index(0)
{
}
iterator(DenseHashTable<Key, Item, MutableItem, ItemInterface, Hash, Eq>* set, size_t index)
: set(set)
, index(index)
{
}
MutableItem& operator*() const
{
return *reinterpret_cast<MutableItem*>(&set->data[index]);
}
MutableItem* operator->() const
{
return reinterpret_cast<MutableItem*>(&set->data[index]);
}
bool operator==(const iterator& other) const
{
return set == other.set && index == other.index;
}
bool operator!=(const iterator& other) const
{
return set != other.set || index != other.index;
}
iterator& operator++()
{
size_t size = set->capacity;
do
{
index++;
} while (index < size && set->eq(ItemInterface::getKey(set->data[index]), set->empty_key));
return *this;
}
iterator operator++(int)
{
iterator res = *this;
++*this;
return res;
}
private:
DenseHashTable<Key, Item, MutableItem, ItemInterface, Hash, Eq>* set;
size_t index;
};
private:
Item* data;
size_t capacity;
size_t count;
Key empty_key;
Hash hasher;
Eq eq;
};
template<typename Key>
struct ItemInterfaceSet
{
static const Key& getKey(const Key& item)
{
return item;
}
static void setKey(Key& item, const Key& key)
{
item = key;
}
static void fill(Key* data, size_t count, const Key& key)
{
for (size_t i = 0; i < count; ++i)
new (&data[i]) Key(key);
}
static void destroy(Key* data, size_t count)
{
for (size_t i = 0; i < count; ++i)
data[i].~Key();
}
};
template<typename Key, typename Value>
struct ItemInterfaceMap
{
static const Key& getKey(const std::pair<Key, Value>& item)
{
return item.first;
}
static void setKey(std::pair<Key, Value>& item, const Key& key)
{
item.first = key;
}
static void fill(std::pair<Key, Value>* data, size_t count, const Key& key)
{
for (size_t i = 0; i < count; ++i)
{
new (&data[i].first) Key(key);
new (&data[i].second) Value();
}
}
static void destroy(std::pair<Key, Value>* data, size_t count)
{
for (size_t i = 0; i < count; ++i)
{
data[i].first.~Key();
data[i].second.~Value();
}
}
};
} // namespace detail
// This is a faster alternative of unordered_set, but it does not implement the same interface (i.e. it does not support erasing)
template<typename Key, typename Hash = detail::DenseHashDefault<Key>, typename Eq = std::equal_to<Key>>
class DenseHashSet
{
typedef detail::DenseHashTable<Key, Key, Key, detail::ItemInterfaceSet<Key>, Hash, Eq> Impl;
Impl impl;
public:
typedef typename Impl::const_iterator const_iterator;
typedef typename Impl::iterator iterator;
DenseHashSet(const Key& empty_key, size_t buckets = 0)
: impl(empty_key, buckets)
{
}
void clear()
{
impl.clear();
}
const Key& insert(const Key& key)
{
impl.rehash_if_full(key);
return *impl.insert_unsafe(key);
}
const Key* find(const Key& key) const
{
return impl.find(key);
}
bool contains(const Key& key) const
{
return impl.find(key) != 0;
}
size_t size() const
{
return impl.size();
}
bool empty() const
{
return impl.size() == 0;
}
const_iterator begin() const
{
return impl.begin();
}
const_iterator end() const
{
return impl.end();
}
iterator begin()
{
return impl.begin();
}
iterator end()
{
return impl.end();
}
};
// This is a faster alternative of unordered_map, but it does not implement the same interface (i.e. it does not support erasing and has
// contains() instead of find())
template<typename Key, typename Value, typename Hash = detail::DenseHashDefault<Key>, typename Eq = std::equal_to<Key>>
class DenseHashMap
{
typedef detail::DenseHashTable<Key, std::pair<Key, Value>, std::pair<const Key, Value>, detail::ItemInterfaceMap<Key, Value>, Hash, Eq> Impl;
Impl impl;
public:
typedef typename Impl::const_iterator const_iterator;
typedef typename Impl::iterator iterator;
DenseHashMap(const Key& empty_key, size_t buckets = 0)
: impl(empty_key, buckets)
{
}
void clear()
{
impl.clear();
}
// Note: this reference is invalidated by any insert operation (i.e. operator[])
Value& operator[](const Key& key)
{
impl.rehash_if_full(key);
return impl.insert_unsafe(key)->second;
}
// Note: this pointer is invalidated by any insert operation (i.e. operator[])
const Value* find(const Key& key) const
{
const std::pair<Key, Value>* result = impl.find(key);
return result ? &result->second : NULL;
}
// Note: this pointer is invalidated by any insert operation (i.e. operator[])
Value* find(const Key& key)
{
const std::pair<Key, Value>* result = impl.find(key);
return result ? const_cast<Value*>(&result->second) : NULL;
}
bool contains(const Key& key) const
{
return impl.find(key) != 0;
}
size_t size() const
{
return impl.size();
}
bool empty() const
{
return impl.size() == 0;
}
const_iterator begin() const
{
return impl.begin();
}
const_iterator end() const
{
return impl.end();
}
iterator begin()
{
return impl.begin();
}
iterator end()
{
return impl.end();
}
};
} // namespace Luau

View File

@ -1,28 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <string.h>
namespace Luau
{
inline bool isFlagExperimental(const char* flag)
{
// Flags in this list are disabled by default in various command-line tools. They may have behavior that is not fully final,
// or critical bugs that are found after the code has been submitted.
static const char* const kList[] = {
"LuauInstantiateInSubtyping", // requires some fixes to lua-apps code
"LuauTypecheckTypeguards", // requires some fixes to lua-apps code (CLI-67030)
"LuauTinyControlFlowAnalysis", // waiting for updates to packages depended by internal builtin plugins
// makes sure we always have at least one entry
nullptr,
};
for (const char* item : kList)
if (item && strcmp(item, flag) == 0)
return true;
return false;
}
} // namespace Luau

View File

@ -1,285 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Bytecode.h"
#include "Luau/DenseHash.h"
#include "Luau/StringUtils.h"
#include <string>
namespace Luau
{
class BytecodeEncoder
{
public:
virtual ~BytecodeEncoder() {}
virtual uint8_t encodeOp(uint8_t op) = 0;
};
class BytecodeBuilder
{
public:
// BytecodeBuilder does *not* copy the data passed via StringRef; instead, it keeps the ref around until finalize()
// Please be careful with the lifetime of the data that's being passed because of this.
// The safe and correct pattern is to only build StringRefs out of pieces of AST (AstName or AstArray<>) that are backed by AstAllocator.
// Note that you must finalize() the builder before the Allocator backing the Ast is destroyed.
struct StringRef
{
// To construct a StringRef, use sref() from Compiler.cpp.
const char* data = nullptr;
size_t length = 0;
bool operator==(const StringRef& other) const;
};
struct TableShape
{
static const unsigned int kMaxLength = 32;
int32_t keys[kMaxLength];
unsigned int length = 0;
bool operator==(const TableShape& other) const;
};
BytecodeBuilder(BytecodeEncoder* encoder = 0);
uint32_t beginFunction(uint8_t numparams, bool isvararg = false);
void endFunction(uint8_t maxstacksize, uint8_t numupvalues);
void setMainFunction(uint32_t fid);
int32_t addConstantNil();
int32_t addConstantBoolean(bool value);
int32_t addConstantNumber(double value);
int32_t addConstantString(StringRef value);
int32_t addImport(uint32_t iid);
int32_t addConstantTable(const TableShape& shape);
int32_t addConstantClosure(uint32_t fid);
int16_t addChildFunction(uint32_t fid);
void emitABC(LuauOpcode op, uint8_t a, uint8_t b, uint8_t c);
void emitAD(LuauOpcode op, uint8_t a, int16_t d);
void emitE(LuauOpcode op, int32_t e);
void emitAux(uint32_t aux);
size_t emitLabel();
[[nodiscard]] bool patchJumpD(size_t jumpLabel, size_t targetLabel);
[[nodiscard]] bool patchSkipC(size_t jumpLabel, size_t targetLabel);
void foldJumps();
void expandJumps();
void setFunctionTypeInfo(std::string value);
void setDebugFunctionName(StringRef name);
void setDebugFunctionLineDefined(int line);
void setDebugLine(int line);
void pushDebugLocal(StringRef name, uint8_t reg, uint32_t startpc, uint32_t endpc);
void pushDebugUpval(StringRef name);
size_t getInstructionCount() const;
uint32_t getDebugPC() const;
void addDebugRemark(const char* format, ...) LUAU_PRINTF_ATTR(2, 3);
void finalize();
enum DumpFlags
{
Dump_Code = 1 << 0,
Dump_Lines = 1 << 1,
Dump_Source = 1 << 2,
Dump_Locals = 1 << 3,
Dump_Remarks = 1 << 4,
};
void setDumpFlags(uint32_t flags)
{
dumpFlags = flags;
dumpFunctionPtr = &BytecodeBuilder::dumpCurrentFunction;
}
void setDumpSource(const std::string& source);
bool needsDebugRemarks() const
{
return (dumpFlags & Dump_Remarks) != 0;
}
const std::string& getBytecode() const
{
LUAU_ASSERT(!bytecode.empty()); // did you forget to call finalize?
return bytecode;
}
std::string dumpFunction(uint32_t id) const;
std::string dumpEverything() const;
std::string dumpSourceRemarks() const;
std::string dumpTypeInfo() const;
void annotateInstruction(std::string& result, uint32_t fid, uint32_t instpos) const;
static uint32_t getImportId(int32_t id0);
static uint32_t getImportId(int32_t id0, int32_t id1);
static uint32_t getImportId(int32_t id0, int32_t id1, int32_t id2);
static int decomposeImportId(uint32_t ids, int32_t& id0, int32_t& id1, int32_t& id2);
static uint32_t getStringHash(StringRef key);
static std::string getError(const std::string& message);
static uint8_t getVersion();
static uint8_t getTypeEncodingVersion();
private:
struct Constant
{
enum Type
{
Type_Nil,
Type_Boolean,
Type_Number,
Type_String,
Type_Import,
Type_Table,
Type_Closure,
};
Type type;
union
{
bool valueBoolean;
double valueNumber;
unsigned int valueString; // index into string table
uint32_t valueImport; // 10-10-10-2 encoded import id
uint32_t valueTable; // index into tableShapes[]
uint32_t valueClosure; // index of function in global list
};
};
struct ConstantKey
{
Constant::Type type;
// Note: this stores value* from Constant; when type is Number_Double, this stores the same bits as double does but in uint64_t.
uint64_t value;
bool operator==(const ConstantKey& key) const
{
return type == key.type && value == key.value;
}
};
struct Function
{
std::string data;
uint8_t maxstacksize = 0;
uint8_t numparams = 0;
uint8_t numupvalues = 0;
bool isvararg = false;
unsigned int debugname = 0;
int debuglinedefined = 0;
std::string dump;
std::string dumpname;
std::vector<int> dumpinstoffs;
std::string typeinfo;
};
struct DebugLocal
{
unsigned int name;
uint8_t reg;
uint32_t startpc;
uint32_t endpc;
};
struct DebugUpval
{
unsigned int name;
};
struct Jump
{
uint32_t source;
uint32_t target;
};
struct StringRefHash
{
size_t operator()(const StringRef& v) const;
};
struct ConstantKeyHash
{
size_t operator()(const ConstantKey& key) const;
};
struct TableShapeHash
{
size_t operator()(const TableShape& v) const;
};
std::vector<Function> functions;
uint32_t currentFunction = ~0u;
uint32_t mainFunction = ~0u;
std::vector<uint32_t> insns;
std::vector<int> lines;
std::vector<Constant> constants;
std::vector<uint32_t> protos;
std::vector<Jump> jumps;
std::vector<TableShape> tableShapes;
bool hasLongJumps = false;
DenseHashMap<ConstantKey, int32_t, ConstantKeyHash> constantMap;
DenseHashMap<TableShape, int32_t, TableShapeHash> tableShapeMap;
DenseHashMap<uint32_t, int16_t> protoMap;
int debugLine = 0;
std::vector<DebugLocal> debugLocals;
std::vector<DebugUpval> debugUpvals;
DenseHashMap<StringRef, unsigned int, StringRefHash> stringTable;
std::vector<StringRef> debugStrings;
std::vector<std::pair<uint32_t, uint32_t>> debugRemarks;
std::string debugRemarkBuffer;
BytecodeEncoder* encoder = nullptr;
std::string bytecode;
uint32_t dumpFlags = 0;
std::vector<std::string> dumpSource;
std::vector<std::pair<int, std::string>> dumpRemarks;
std::string (BytecodeBuilder::*dumpFunctionPtr)(std::vector<int>&) const = nullptr;
void validate() const;
void validateInstructions() const;
void validateVariadic() const;
std::string dumpCurrentFunction(std::vector<int>& dumpinstoffs) const;
void dumpConstant(std::string& result, int k) const;
void dumpInstruction(const uint32_t* opcode, std::string& output, int targetLabel) const;
void writeFunction(std::string& ss, uint32_t id) const;
void writeLineInfo(std::string& ss) const;
void writeStringTable(std::string& ss) const;
int32_t addConstant(const ConstantKey& key, const Constant& value);
unsigned int addStringTableEntry(StringRef value);
};
} // namespace Luau

View File

@ -1,68 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/ParseOptions.h"
#include "Luau/Location.h"
#include "Luau/StringUtils.h"
#include "Luau/Common.h"
namespace Luau
{
class AstNameTable;
struct ParseResult;
class BytecodeBuilder;
class BytecodeEncoder;
// Note: this structure is duplicated in luacode.h, don't forget to change these in sync!
struct CompileOptions
{
// 0 - no optimization
// 1 - baseline optimization level that doesn't prevent debuggability
// 2 - includes optimizations that harm debuggability such as inlining
int optimizationLevel = 1;
// 0 - no debugging support
// 1 - line info & function names only; sufficient for backtraces
// 2 - full debug info with local & upvalue names; necessary for debugger
int debugLevel = 1;
// 0 - no code coverage support
// 1 - statement coverage
// 2 - statement and expression coverage (verbose)
int coverageLevel = 0;
// global builtin to construct vectors; disabled by default
const char* vectorLib = nullptr;
const char* vectorCtor = nullptr;
// null-terminated array of globals that are mutable; disables the import optimization for fields accessed through these
const char** mutableGlobals = nullptr;
};
class CompileError : public std::exception
{
public:
CompileError(const Location& location, const std::string& message);
virtual ~CompileError() throw();
virtual const char* what() const throw();
const Location& getLocation() const;
static LUAU_NORETURN void raise(const Location& location, const char* format, ...) LUAU_PRINTF_ATTR(2, 3);
private:
Location location;
std::string message;
};
// compiles bytecode into bytecode builder using either a pre-parsed AST or parsing it from source; throws on errors
void compileOrThrow(BytecodeBuilder& bytecode, const ParseResult& parseResult, const AstNameTable& names, const CompileOptions& options = {});
void compileOrThrow(BytecodeBuilder& bytecode, const std::string& source, const CompileOptions& options = {}, const ParseOptions& parseOptions = {});
// compiles bytecode into a bytecode blob, that either contains the valid bytecode or an encoded error that luau_load can decode
std::string compile(
const std::string& source, const CompileOptions& options = {}, const ParseOptions& parseOptions = {}, BytecodeEncoder* encoder = nullptr);
} // namespace Luau

View File

@ -1,39 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <stddef.h>
// Can be used to reconfigure visibility/exports for public APIs
#ifndef LUACODE_API
#define LUACODE_API extern
#endif
typedef struct lua_CompileOptions lua_CompileOptions;
struct lua_CompileOptions
{
// 0 - no optimization
// 1 - baseline optimization level that doesn't prevent debuggability
// 2 - includes optimizations that harm debuggability such as inlining
int optimizationLevel; // default=1
// 0 - no debugging support
// 1 - line info & function names only; sufficient for backtraces
// 2 - full debug info with local & upvalue names; necessary for debugger
int debugLevel; // default=1
// 0 - no code coverage support
// 1 - statement coverage
// 2 - statement and expression coverage (verbose)
int coverageLevel; // default=0
// global builtin to construct vectors; disabled by default
const char* vectorLib;
const char* vectorCtor;
// null-terminated array of globals that are mutable; disables the import optimization for fields accessed through these
const char** mutableGlobals;
};
// compile source to bytecode; when source compilation fails, the resulting bytecode contains the encoded error. use free() to destroy
LUACODE_API char* luau_compile(const char* source, size_t size, lua_CompileOptions* options, size_t* outsize);

View File

@ -1,464 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "BuiltinFolding.h"
#include "Luau/Bytecode.h"
#include <math.h>
namespace Luau
{
namespace Compile
{
const double kRadDeg = 3.14159265358979323846 / 180.0;
static Constant cvar()
{
return Constant();
}
static Constant cbool(bool v)
{
Constant res = {Constant::Type_Boolean};
res.valueBoolean = v;
return res;
}
static Constant cnum(double v)
{
Constant res = {Constant::Type_Number};
res.valueNumber = v;
return res;
}
static Constant cstring(const char* v)
{
Constant res = {Constant::Type_String};
res.stringLength = unsigned(strlen(v));
res.valueString = v;
return res;
}
static Constant ctype(const Constant& c)
{
LUAU_ASSERT(c.type != Constant::Type_Unknown);
switch (c.type)
{
case Constant::Type_Nil:
return cstring("nil");
case Constant::Type_Boolean:
return cstring("boolean");
case Constant::Type_Number:
return cstring("number");
case Constant::Type_String:
return cstring("string");
default:
LUAU_ASSERT(!"Unsupported constant type");
return cvar();
}
}
static uint32_t bit32(double v)
{
// convert through signed 64-bit integer to match runtime behavior and gracefully truncate negative integers
return uint32_t(int64_t(v));
}
Constant foldBuiltin(int bfid, const Constant* args, size_t count)
{
switch (bfid)
{
case LBF_MATH_ABS:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(fabs(args[0].valueNumber));
break;
case LBF_MATH_ACOS:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(acos(args[0].valueNumber));
break;
case LBF_MATH_ASIN:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(asin(args[0].valueNumber));
break;
case LBF_MATH_ATAN2:
if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
return cnum(atan2(args[0].valueNumber, args[1].valueNumber));
break;
case LBF_MATH_ATAN:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(atan(args[0].valueNumber));
break;
case LBF_MATH_CEIL:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(ceil(args[0].valueNumber));
break;
case LBF_MATH_COSH:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(cosh(args[0].valueNumber));
break;
case LBF_MATH_COS:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(cos(args[0].valueNumber));
break;
case LBF_MATH_DEG:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(args[0].valueNumber / kRadDeg);
break;
case LBF_MATH_EXP:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(exp(args[0].valueNumber));
break;
case LBF_MATH_FLOOR:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(floor(args[0].valueNumber));
break;
case LBF_MATH_FMOD:
if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
return cnum(fmod(args[0].valueNumber, args[1].valueNumber));
break;
// Note: FREXP isn't folded since it returns multiple values
case LBF_MATH_LDEXP:
if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
return cnum(ldexp(args[0].valueNumber, int(args[1].valueNumber)));
break;
case LBF_MATH_LOG10:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(log10(args[0].valueNumber));
break;
case LBF_MATH_LOG:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(log(args[0].valueNumber));
else if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
{
if (args[1].valueNumber == 2.0)
return cnum(log2(args[0].valueNumber));
else if (args[1].valueNumber == 10.0)
return cnum(log10(args[0].valueNumber));
else
return cnum(log(args[0].valueNumber) / log(args[1].valueNumber));
}
break;
case LBF_MATH_MAX:
if (count >= 1 && args[0].type == Constant::Type_Number)
{
double r = args[0].valueNumber;
for (size_t i = 1; i < count; ++i)
{
if (args[i].type != Constant::Type_Number)
return cvar();
double a = args[i].valueNumber;
r = (a > r) ? a : r;
}
return cnum(r);
}
break;
case LBF_MATH_MIN:
if (count >= 1 && args[0].type == Constant::Type_Number)
{
double r = args[0].valueNumber;
for (size_t i = 1; i < count; ++i)
{
if (args[i].type != Constant::Type_Number)
return cvar();
double a = args[i].valueNumber;
r = (a < r) ? a : r;
}
return cnum(r);
}
break;
// Note: MODF isn't folded since it returns multiple values
case LBF_MATH_POW:
if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
return cnum(pow(args[0].valueNumber, args[1].valueNumber));
break;
case LBF_MATH_RAD:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(args[0].valueNumber * kRadDeg);
break;
case LBF_MATH_SINH:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(sinh(args[0].valueNumber));
break;
case LBF_MATH_SIN:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(sin(args[0].valueNumber));
break;
case LBF_MATH_SQRT:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(sqrt(args[0].valueNumber));
break;
case LBF_MATH_TANH:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(tanh(args[0].valueNumber));
break;
case LBF_MATH_TAN:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(tan(args[0].valueNumber));
break;
case LBF_BIT32_ARSHIFT:
if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
{
uint32_t u = bit32(args[0].valueNumber);
int s = int(args[1].valueNumber);
if (unsigned(s) < 32)
return cnum(double(uint32_t(int32_t(u) >> s)));
}
break;
case LBF_BIT32_BAND:
if (count >= 1 && args[0].type == Constant::Type_Number)
{
uint32_t r = bit32(args[0].valueNumber);
for (size_t i = 1; i < count; ++i)
{
if (args[i].type != Constant::Type_Number)
return cvar();
r &= bit32(args[i].valueNumber);
}
return cnum(double(r));
}
break;
case LBF_BIT32_BNOT:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(double(uint32_t(~bit32(args[0].valueNumber))));
break;
case LBF_BIT32_BOR:
if (count >= 1 && args[0].type == Constant::Type_Number)
{
uint32_t r = bit32(args[0].valueNumber);
for (size_t i = 1; i < count; ++i)
{
if (args[i].type != Constant::Type_Number)
return cvar();
r |= bit32(args[i].valueNumber);
}
return cnum(double(r));
}
break;
case LBF_BIT32_BXOR:
if (count >= 1 && args[0].type == Constant::Type_Number)
{
uint32_t r = bit32(args[0].valueNumber);
for (size_t i = 1; i < count; ++i)
{
if (args[i].type != Constant::Type_Number)
return cvar();
r ^= bit32(args[i].valueNumber);
}
return cnum(double(r));
}
break;
case LBF_BIT32_BTEST:
if (count >= 1 && args[0].type == Constant::Type_Number)
{
uint32_t r = bit32(args[0].valueNumber);
for (size_t i = 1; i < count; ++i)
{
if (args[i].type != Constant::Type_Number)
return cvar();
r &= bit32(args[i].valueNumber);
}
return cbool(r != 0);
}
break;
case LBF_BIT32_EXTRACT:
if (count >= 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number &&
(count == 2 || args[2].type == Constant::Type_Number))
{
uint32_t u = bit32(args[0].valueNumber);
int f = int(args[1].valueNumber);
int w = count == 2 ? 1 : int(args[2].valueNumber);
if (f >= 0 && w > 0 && f + w <= 32)
{
uint32_t m = ~(0xfffffffeu << (w - 1));
return cnum(double((u >> f) & m));
}
}
break;
case LBF_BIT32_LROTATE:
if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
{
uint32_t u = bit32(args[0].valueNumber);
int s = int(args[1].valueNumber);
return cnum(double((u << (s & 31)) | (u >> ((32 - s) & 31))));
}
break;
case LBF_BIT32_LSHIFT:
if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
{
uint32_t u = bit32(args[0].valueNumber);
int s = int(args[1].valueNumber);
if (unsigned(s) < 32)
return cnum(double(u << s));
}
break;
case LBF_BIT32_REPLACE:
if (count >= 3 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number && args[2].type == Constant::Type_Number &&
(count == 3 || args[3].type == Constant::Type_Number))
{
uint32_t n = bit32(args[0].valueNumber);
uint32_t v = bit32(args[1].valueNumber);
int f = int(args[2].valueNumber);
int w = count == 3 ? 1 : int(args[3].valueNumber);
if (f >= 0 && w > 0 && f + w <= 32)
{
uint32_t m = ~(0xfffffffeu << (w - 1));
return cnum(double((n & ~(m << f)) | ((v & m) << f)));
}
}
break;
case LBF_BIT32_RROTATE:
if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
{
uint32_t u = bit32(args[0].valueNumber);
int s = int(args[1].valueNumber);
return cnum(double((u >> (s & 31)) | (u << ((32 - s) & 31))));
}
break;
case LBF_BIT32_RSHIFT:
if (count == 2 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number)
{
uint32_t u = bit32(args[0].valueNumber);
int s = int(args[1].valueNumber);
if (unsigned(s) < 32)
return cnum(double(u >> s));
}
break;
case LBF_TYPE:
if (count == 1 && args[0].type != Constant::Type_Unknown)
return ctype(args[0]);
break;
case LBF_STRING_BYTE:
if (count == 1 && args[0].type == Constant::Type_String)
{
if (args[0].stringLength > 0)
return cnum(double(uint8_t(args[0].valueString[0])));
}
else if (count == 2 && args[0].type == Constant::Type_String && args[1].type == Constant::Type_Number)
{
int i = int(args[1].valueNumber);
if (i > 0 && unsigned(i) <= args[0].stringLength)
return cnum(double(uint8_t(args[0].valueString[i - 1])));
}
break;
case LBF_STRING_LEN:
if (count == 1 && args[0].type == Constant::Type_String)
return cnum(double(args[0].stringLength));
break;
case LBF_TYPEOF:
if (count == 1 && args[0].type != Constant::Type_Unknown)
return ctype(args[0]);
break;
case LBF_MATH_CLAMP:
if (count == 3 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number && args[2].type == Constant::Type_Number)
{
double min = args[1].valueNumber;
double max = args[2].valueNumber;
if (min <= max)
{
double v = args[0].valueNumber;
v = v < min ? min : v;
v = v > max ? max : v;
return cnum(v);
}
}
break;
case LBF_MATH_SIGN:
if (count == 1 && args[0].type == Constant::Type_Number)
{
double v = args[0].valueNumber;
return cnum(v > 0.0 ? 1.0 : v < 0.0 ? -1.0 : 0.0);
}
break;
case LBF_MATH_ROUND:
if (count == 1 && args[0].type == Constant::Type_Number)
return cnum(round(args[0].valueNumber));
break;
}
return cvar();
}
} // namespace Compile
} // namespace Luau

View File

@ -1,14 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "ConstantFolding.h"
namespace Luau
{
namespace Compile
{
Constant foldBuiltin(int bfid, const Constant* args, size_t count);
} // namespace Compile
} // namespace Luau

View File

@ -1,400 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Builtins.h"
#include "Luau/Bytecode.h"
#include "Luau/Compiler.h"
namespace Luau
{
namespace Compile
{
Builtin getBuiltin(AstExpr* node, const DenseHashMap<AstName, Global>& globals, const DenseHashMap<AstLocal*, Variable>& variables)
{
if (AstExprLocal* expr = node->as<AstExprLocal>())
{
const Variable* v = variables.find(expr->local);
return v && !v->written && v->init ? getBuiltin(v->init, globals, variables) : Builtin();
}
else if (AstExprIndexName* expr = node->as<AstExprIndexName>())
{
if (AstExprGlobal* object = expr->expr->as<AstExprGlobal>())
{
return getGlobalState(globals, object->name) == Global::Default ? Builtin{object->name, expr->index} : Builtin();
}
else
{
return Builtin();
}
}
else if (AstExprGlobal* expr = node->as<AstExprGlobal>())
{
return getGlobalState(globals, expr->name) == Global::Default ? Builtin{AstName(), expr->name} : Builtin();
}
else
{
return Builtin();
}
}
static int getBuiltinFunctionId(const Builtin& builtin, const CompileOptions& options)
{
if (builtin.isGlobal("assert"))
return LBF_ASSERT;
if (builtin.isGlobal("type"))
return LBF_TYPE;
if (builtin.isGlobal("typeof"))
return LBF_TYPEOF;
if (builtin.isGlobal("rawset"))
return LBF_RAWSET;
if (builtin.isGlobal("rawget"))
return LBF_RAWGET;
if (builtin.isGlobal("rawequal"))
return LBF_RAWEQUAL;
if (builtin.isGlobal("rawlen"))
return LBF_RAWLEN;
if (builtin.isGlobal("unpack"))
return LBF_TABLE_UNPACK;
if (builtin.isGlobal("select"))
return LBF_SELECT_VARARG;
if (builtin.isGlobal("getmetatable"))
return LBF_GETMETATABLE;
if (builtin.isGlobal("setmetatable"))
return LBF_SETMETATABLE;
if (builtin.object == "math")
{
if (builtin.method == "abs")
return LBF_MATH_ABS;
if (builtin.method == "acos")
return LBF_MATH_ACOS;
if (builtin.method == "asin")
return LBF_MATH_ASIN;
if (builtin.method == "atan2")
return LBF_MATH_ATAN2;
if (builtin.method == "atan")
return LBF_MATH_ATAN;
if (builtin.method == "ceil")
return LBF_MATH_CEIL;
if (builtin.method == "cosh")
return LBF_MATH_COSH;
if (builtin.method == "cos")
return LBF_MATH_COS;
if (builtin.method == "deg")
return LBF_MATH_DEG;
if (builtin.method == "exp")
return LBF_MATH_EXP;
if (builtin.method == "floor")
return LBF_MATH_FLOOR;
if (builtin.method == "fmod")
return LBF_MATH_FMOD;
if (builtin.method == "frexp")
return LBF_MATH_FREXP;
if (builtin.method == "ldexp")
return LBF_MATH_LDEXP;
if (builtin.method == "log10")
return LBF_MATH_LOG10;
if (builtin.method == "log")
return LBF_MATH_LOG;
if (builtin.method == "max")
return LBF_MATH_MAX;
if (builtin.method == "min")
return LBF_MATH_MIN;
if (builtin.method == "modf")
return LBF_MATH_MODF;
if (builtin.method == "pow")
return LBF_MATH_POW;
if (builtin.method == "rad")
return LBF_MATH_RAD;
if (builtin.method == "sinh")
return LBF_MATH_SINH;
if (builtin.method == "sin")
return LBF_MATH_SIN;
if (builtin.method == "sqrt")
return LBF_MATH_SQRT;
if (builtin.method == "tanh")
return LBF_MATH_TANH;
if (builtin.method == "tan")
return LBF_MATH_TAN;
if (builtin.method == "clamp")
return LBF_MATH_CLAMP;
if (builtin.method == "sign")
return LBF_MATH_SIGN;
if (builtin.method == "round")
return LBF_MATH_ROUND;
}
if (builtin.object == "bit32")
{
if (builtin.method == "arshift")
return LBF_BIT32_ARSHIFT;
if (builtin.method == "band")
return LBF_BIT32_BAND;
if (builtin.method == "bnot")
return LBF_BIT32_BNOT;
if (builtin.method == "bor")
return LBF_BIT32_BOR;
if (builtin.method == "bxor")
return LBF_BIT32_BXOR;
if (builtin.method == "btest")
return LBF_BIT32_BTEST;
if (builtin.method == "extract")
return LBF_BIT32_EXTRACT;
if (builtin.method == "lrotate")
return LBF_BIT32_LROTATE;
if (builtin.method == "lshift")
return LBF_BIT32_LSHIFT;
if (builtin.method == "replace")
return LBF_BIT32_REPLACE;
if (builtin.method == "rrotate")
return LBF_BIT32_RROTATE;
if (builtin.method == "rshift")
return LBF_BIT32_RSHIFT;
if (builtin.method == "countlz")
return LBF_BIT32_COUNTLZ;
if (builtin.method == "countrz")
return LBF_BIT32_COUNTRZ;
}
if (builtin.object == "string")
{
if (builtin.method == "byte")
return LBF_STRING_BYTE;
if (builtin.method == "char")
return LBF_STRING_CHAR;
if (builtin.method == "len")
return LBF_STRING_LEN;
if (builtin.method == "sub")
return LBF_STRING_SUB;
}
if (builtin.object == "table")
{
if (builtin.method == "insert")
return LBF_TABLE_INSERT;
if (builtin.method == "unpack")
return LBF_TABLE_UNPACK;
}
if (options.vectorCtor)
{
if (options.vectorLib)
{
if (builtin.isMethod(options.vectorLib, options.vectorCtor))
return LBF_VECTOR;
}
else
{
if (builtin.isGlobal(options.vectorCtor))
return LBF_VECTOR;
}
}
return -1;
}
struct BuiltinVisitor : AstVisitor
{
DenseHashMap<AstExprCall*, int>& result;
const DenseHashMap<AstName, Global>& globals;
const DenseHashMap<AstLocal*, Variable>& variables;
const CompileOptions& options;
BuiltinVisitor(DenseHashMap<AstExprCall*, int>& result, const DenseHashMap<AstName, Global>& globals,
const DenseHashMap<AstLocal*, Variable>& variables, const CompileOptions& options)
: result(result)
, globals(globals)
, variables(variables)
, options(options)
{
}
bool visit(AstExprCall* node) override
{
Builtin builtin = node->self ? Builtin() : getBuiltin(node->func, globals, variables);
if (builtin.empty())
return true;
int bfid = getBuiltinFunctionId(builtin, options);
// getBuiltinFunctionId optimistically assumes all select() calls are builtin but actually the second argument must be a vararg
if (bfid == LBF_SELECT_VARARG && !(node->args.size == 2 && node->args.data[1]->is<AstExprVarargs>()))
bfid = -1;
if (bfid >= 0)
result[node] = bfid;
return true; // propagate to nested calls
}
};
void analyzeBuiltins(DenseHashMap<AstExprCall*, int>& result, const DenseHashMap<AstName, Global>& globals,
const DenseHashMap<AstLocal*, Variable>& variables, const CompileOptions& options, AstNode* root)
{
BuiltinVisitor visitor{result, globals, variables, options};
root->visit(&visitor);
}
BuiltinInfo getBuiltinInfo(int bfid)
{
switch (LuauBuiltinFunction(bfid))
{
case LBF_NONE:
return {-1, -1};
case LBF_ASSERT:
return {-1, -1}; // assert() returns all values when first value is truthy
case LBF_MATH_ABS:
case LBF_MATH_ACOS:
case LBF_MATH_ASIN:
return {1, 1};
case LBF_MATH_ATAN2:
return {2, 1};
case LBF_MATH_ATAN:
case LBF_MATH_CEIL:
case LBF_MATH_COSH:
case LBF_MATH_COS:
case LBF_MATH_DEG:
case LBF_MATH_EXP:
case LBF_MATH_FLOOR:
return {1, 1};
case LBF_MATH_FMOD:
return {2, 1};
case LBF_MATH_FREXP:
return {1, 2};
case LBF_MATH_LDEXP:
return {2, 1};
case LBF_MATH_LOG10:
return {1, 1};
case LBF_MATH_LOG:
return {-1, 1}; // 1 or 2 parameters
case LBF_MATH_MAX:
case LBF_MATH_MIN:
return {-1, 1}; // variadic
case LBF_MATH_MODF:
return {1, 2};
case LBF_MATH_POW:
return {2, 1};
case LBF_MATH_RAD:
case LBF_MATH_SINH:
case LBF_MATH_SIN:
case LBF_MATH_SQRT:
case LBF_MATH_TANH:
case LBF_MATH_TAN:
return {1, 1};
case LBF_BIT32_ARSHIFT:
return {2, 1};
case LBF_BIT32_BAND:
return {-1, 1}; // variadic
case LBF_BIT32_BNOT:
return {1, 1};
case LBF_BIT32_BOR:
case LBF_BIT32_BXOR:
case LBF_BIT32_BTEST:
return {-1, 1}; // variadic
case LBF_BIT32_EXTRACT:
return {-1, 1}; // 2 or 3 parameters
case LBF_BIT32_LROTATE:
case LBF_BIT32_LSHIFT:
return {2, 1};
case LBF_BIT32_REPLACE:
return {-1, 1}; // 3 or 4 parameters
case LBF_BIT32_RROTATE:
case LBF_BIT32_RSHIFT:
return {2, 1};
case LBF_TYPE:
return {1, 1};
case LBF_STRING_BYTE:
return {-1, -1}; // 1, 2 or 3 parameters
case LBF_STRING_CHAR:
return {-1, 1}; // variadic
case LBF_STRING_LEN:
return {1, 1};
case LBF_TYPEOF:
return {1, 1};
case LBF_STRING_SUB:
return {-1, 1}; // 2 or 3 parameters
case LBF_MATH_CLAMP:
return {3, 1};
case LBF_MATH_SIGN:
case LBF_MATH_ROUND:
return {1, 1};
case LBF_RAWSET:
return {3, 1};
case LBF_RAWGET:
case LBF_RAWEQUAL:
return {2, 1};
case LBF_TABLE_INSERT:
return {-1, 0}; // 2 or 3 parameters
case LBF_TABLE_UNPACK:
return {-1, -1}; // 1, 2 or 3 parameters
case LBF_VECTOR:
return {-1, 1}; // 3 or 4 parameters in some configurations
case LBF_BIT32_COUNTLZ:
case LBF_BIT32_COUNTRZ:
return {1, 1};
case LBF_SELECT_VARARG:
return {-1, -1}; // variadic
case LBF_RAWLEN:
return {1, 1};
case LBF_BIT32_EXTRACTK:
return {3, 1};
case LBF_GETMETATABLE:
return {1, 1};
case LBF_SETMETATABLE:
return {2, 1};
};
LUAU_UNREACHABLE();
}
} // namespace Compile
} // namespace Luau

View File

@ -1,51 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "ValueTracking.h"
namespace Luau
{
struct CompileOptions;
}
namespace Luau
{
namespace Compile
{
struct Builtin
{
AstName object;
AstName method;
bool empty() const
{
return object == AstName() && method == AstName();
}
bool isGlobal(const char* name) const
{
return object == AstName() && method == name;
}
bool isMethod(const char* table, const char* name) const
{
return object == table && method == name;
}
};
Builtin getBuiltin(AstExpr* node, const DenseHashMap<AstName, Global>& globals, const DenseHashMap<AstLocal*, Variable>& variables);
void analyzeBuiltins(DenseHashMap<AstExprCall*, int>& result, const DenseHashMap<AstName, Global>& globals,
const DenseHashMap<AstLocal*, Variable>& variables, const CompileOptions& options, AstNode* root);
struct BuiltinInfo
{
int params;
int results;
};
BuiltinInfo getBuiltinInfo(int bfid);
} // namespace Compile
} // namespace Luau

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,447 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "ConstantFolding.h"
#include "BuiltinFolding.h"
#include <vector>
#include <math.h>
namespace Luau
{
namespace Compile
{
static bool constantsEqual(const Constant& la, const Constant& ra)
{
LUAU_ASSERT(la.type != Constant::Type_Unknown && ra.type != Constant::Type_Unknown);
switch (la.type)
{
case Constant::Type_Nil:
return ra.type == Constant::Type_Nil;
case Constant::Type_Boolean:
return ra.type == Constant::Type_Boolean && la.valueBoolean == ra.valueBoolean;
case Constant::Type_Number:
return ra.type == Constant::Type_Number && la.valueNumber == ra.valueNumber;
case Constant::Type_String:
return ra.type == Constant::Type_String && la.stringLength == ra.stringLength && memcmp(la.valueString, ra.valueString, la.stringLength) == 0;
default:
LUAU_ASSERT(!"Unexpected constant type in comparison");
return false;
}
}
static void foldUnary(Constant& result, AstExprUnary::Op op, const Constant& arg)
{
switch (op)
{
case AstExprUnary::Not:
if (arg.type != Constant::Type_Unknown)
{
result.type = Constant::Type_Boolean;
result.valueBoolean = !arg.isTruthful();
}
break;
case AstExprUnary::Minus:
if (arg.type == Constant::Type_Number)
{
result.type = Constant::Type_Number;
result.valueNumber = -arg.valueNumber;
}
break;
case AstExprUnary::Len:
if (arg.type == Constant::Type_String)
{
result.type = Constant::Type_Number;
result.valueNumber = double(arg.stringLength);
}
break;
default:
LUAU_ASSERT(!"Unexpected unary operation");
}
}
static void foldBinary(Constant& result, AstExprBinary::Op op, const Constant& la, const Constant& ra)
{
switch (op)
{
case AstExprBinary::Add:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Number;
result.valueNumber = la.valueNumber + ra.valueNumber;
}
break;
case AstExprBinary::Sub:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Number;
result.valueNumber = la.valueNumber - ra.valueNumber;
}
break;
case AstExprBinary::Mul:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Number;
result.valueNumber = la.valueNumber * ra.valueNumber;
}
break;
case AstExprBinary::Div:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Number;
result.valueNumber = la.valueNumber / ra.valueNumber;
}
break;
case AstExprBinary::Mod:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Number;
result.valueNumber = la.valueNumber - floor(la.valueNumber / ra.valueNumber) * ra.valueNumber;
}
break;
case AstExprBinary::Pow:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Number;
result.valueNumber = pow(la.valueNumber, ra.valueNumber);
}
break;
case AstExprBinary::Concat:
break;
case AstExprBinary::CompareNe:
if (la.type != Constant::Type_Unknown && ra.type != Constant::Type_Unknown)
{
result.type = Constant::Type_Boolean;
result.valueBoolean = !constantsEqual(la, ra);
}
break;
case AstExprBinary::CompareEq:
if (la.type != Constant::Type_Unknown && ra.type != Constant::Type_Unknown)
{
result.type = Constant::Type_Boolean;
result.valueBoolean = constantsEqual(la, ra);
}
break;
case AstExprBinary::CompareLt:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Boolean;
result.valueBoolean = la.valueNumber < ra.valueNumber;
}
break;
case AstExprBinary::CompareLe:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Boolean;
result.valueBoolean = la.valueNumber <= ra.valueNumber;
}
break;
case AstExprBinary::CompareGt:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Boolean;
result.valueBoolean = la.valueNumber > ra.valueNumber;
}
break;
case AstExprBinary::CompareGe:
if (la.type == Constant::Type_Number && ra.type == Constant::Type_Number)
{
result.type = Constant::Type_Boolean;
result.valueBoolean = la.valueNumber >= ra.valueNumber;
}
break;
case AstExprBinary::And:
if (la.type != Constant::Type_Unknown)
{
result = la.isTruthful() ? ra : la;
}
break;
case AstExprBinary::Or:
if (la.type != Constant::Type_Unknown)
{
result = la.isTruthful() ? la : ra;
}
break;
default:
LUAU_ASSERT(!"Unexpected binary operation");
}
}
struct ConstantVisitor : AstVisitor
{
DenseHashMap<AstExpr*, Constant>& constants;
DenseHashMap<AstLocal*, Variable>& variables;
DenseHashMap<AstLocal*, Constant>& locals;
const DenseHashMap<AstExprCall*, int>* builtins;
bool wasEmpty = false;
std::vector<Constant> builtinArgs;
ConstantVisitor(DenseHashMap<AstExpr*, Constant>& constants, DenseHashMap<AstLocal*, Variable>& variables,
DenseHashMap<AstLocal*, Constant>& locals, const DenseHashMap<AstExprCall*, int>* builtins)
: constants(constants)
, variables(variables)
, locals(locals)
, builtins(builtins)
{
// since we do a single pass over the tree, if the initial state was empty we don't need to clear out old entries
wasEmpty = constants.empty() && locals.empty();
}
Constant analyze(AstExpr* node)
{
Constant result;
result.type = Constant::Type_Unknown;
if (AstExprGroup* expr = node->as<AstExprGroup>())
{
result = analyze(expr->expr);
}
else if (node->is<AstExprConstantNil>())
{
result.type = Constant::Type_Nil;
}
else if (AstExprConstantBool* expr = node->as<AstExprConstantBool>())
{
result.type = Constant::Type_Boolean;
result.valueBoolean = expr->value;
}
else if (AstExprConstantNumber* expr = node->as<AstExprConstantNumber>())
{
result.type = Constant::Type_Number;
result.valueNumber = expr->value;
}
else if (AstExprConstantString* expr = node->as<AstExprConstantString>())
{
result.type = Constant::Type_String;
result.valueString = expr->value.data;
result.stringLength = unsigned(expr->value.size);
}
else if (AstExprLocal* expr = node->as<AstExprLocal>())
{
const Constant* l = locals.find(expr->local);
if (l)
result = *l;
}
else if (node->is<AstExprGlobal>())
{
// nope
}
else if (node->is<AstExprVarargs>())
{
// nope
}
else if (AstExprCall* expr = node->as<AstExprCall>())
{
analyze(expr->func);
if (const int* bfid = builtins ? builtins->find(expr) : nullptr)
{
// since recursive calls to analyze() may reuse the vector we need to be careful and preserve existing contents
size_t offset = builtinArgs.size();
bool canFold = true;
builtinArgs.reserve(offset + expr->args.size);
for (size_t i = 0; i < expr->args.size; ++i)
{
Constant ac = analyze(expr->args.data[i]);
if (ac.type == Constant::Type_Unknown)
canFold = false;
else
builtinArgs.push_back(ac);
}
if (canFold)
{
LUAU_ASSERT(builtinArgs.size() == offset + expr->args.size);
result = foldBuiltin(*bfid, builtinArgs.data() + offset, expr->args.size);
}
builtinArgs.resize(offset);
}
else
{
for (size_t i = 0; i < expr->args.size; ++i)
analyze(expr->args.data[i]);
}
}
else if (AstExprIndexName* expr = node->as<AstExprIndexName>())
{
analyze(expr->expr);
}
else if (AstExprIndexExpr* expr = node->as<AstExprIndexExpr>())
{
analyze(expr->expr);
analyze(expr->index);
}
else if (AstExprFunction* expr = node->as<AstExprFunction>())
{
// this is necessary to propagate constant information in all child functions
expr->body->visit(this);
}
else if (AstExprTable* expr = node->as<AstExprTable>())
{
for (size_t i = 0; i < expr->items.size; ++i)
{
const AstExprTable::Item& item = expr->items.data[i];
if (item.key)
analyze(item.key);
analyze(item.value);
}
}
else if (AstExprUnary* expr = node->as<AstExprUnary>())
{
Constant arg = analyze(expr->expr);
if (arg.type != Constant::Type_Unknown)
foldUnary(result, expr->op, arg);
}
else if (AstExprBinary* expr = node->as<AstExprBinary>())
{
Constant la = analyze(expr->left);
Constant ra = analyze(expr->right);
// note: ra doesn't need to be constant to fold and/or
if (la.type != Constant::Type_Unknown)
foldBinary(result, expr->op, la, ra);
}
else if (AstExprTypeAssertion* expr = node->as<AstExprTypeAssertion>())
{
Constant arg = analyze(expr->expr);
result = arg;
}
else if (AstExprIfElse* expr = node->as<AstExprIfElse>())
{
Constant cond = analyze(expr->condition);
Constant trueExpr = analyze(expr->trueExpr);
Constant falseExpr = analyze(expr->falseExpr);
if (cond.type != Constant::Type_Unknown)
result = cond.isTruthful() ? trueExpr : falseExpr;
}
else if (AstExprInterpString* expr = node->as<AstExprInterpString>())
{
for (AstExpr* expression : expr->expressions)
analyze(expression);
}
else
{
LUAU_ASSERT(!"Unknown expression type");
}
recordConstant(constants, node, result);
return result;
}
template<typename T>
void recordConstant(DenseHashMap<T, Constant>& map, T key, const Constant& value)
{
if (value.type != Constant::Type_Unknown)
map[key] = value;
else if (wasEmpty)
;
else if (Constant* old = map.find(key))
old->type = Constant::Type_Unknown;
}
void recordValue(AstLocal* local, const Constant& value)
{
// note: we rely on trackValues to have been run before us
Variable* v = variables.find(local);
LUAU_ASSERT(v);
if (!v->written)
{
v->constant = (value.type != Constant::Type_Unknown);
recordConstant(locals, local, value);
}
}
bool visit(AstExpr* node) override
{
// note: we short-circuit the visitor traversal through any expression trees by returning false
// recursive traversal is happening inside analyze() which makes it easier to get the resulting value of the subexpression
analyze(node);
return false;
}
bool visit(AstStatLocal* node) override
{
// all values that align wrt indexing are simple - we just match them 1-1
for (size_t i = 0; i < node->vars.size && i < node->values.size; ++i)
{
Constant arg = analyze(node->values.data[i]);
recordValue(node->vars.data[i], arg);
}
if (node->vars.size > node->values.size)
{
// if we have trailing variables, then depending on whether the last value is capable of returning multiple values
// (aka call or varargs), we either don't know anything about these vars, or we know they're nil
AstExpr* last = node->values.size ? node->values.data[node->values.size - 1] : nullptr;
bool multRet = last && (last->is<AstExprCall>() || last->is<AstExprVarargs>());
if (!multRet)
{
for (size_t i = node->values.size; i < node->vars.size; ++i)
{
Constant nil = {Constant::Type_Nil};
recordValue(node->vars.data[i], nil);
}
}
}
else
{
// we can have more values than variables; in this case we still need to analyze them to make sure we do constant propagation inside
// them
for (size_t i = node->vars.size; i < node->values.size; ++i)
analyze(node->values.data[i]);
}
return false;
}
};
void foldConstants(DenseHashMap<AstExpr*, Constant>& constants, DenseHashMap<AstLocal*, Variable>& variables,
DenseHashMap<AstLocal*, Constant>& locals, const DenseHashMap<AstExprCall*, int>* builtins, AstNode* root)
{
ConstantVisitor visitor{constants, variables, locals, builtins};
root->visit(&visitor);
}
} // namespace Compile
} // namespace Luau

View File

@ -1,49 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "ValueTracking.h"
namespace Luau
{
namespace Compile
{
struct Constant
{
enum Type
{
Type_Unknown,
Type_Nil,
Type_Boolean,
Type_Number,
Type_String,
};
Type type = Type_Unknown;
unsigned int stringLength = 0;
union
{
bool valueBoolean;
double valueNumber;
const char* valueString = nullptr; // length stored in stringLength
};
bool isTruthful() const
{
LUAU_ASSERT(type != Type_Unknown);
return type != Type_Nil && !(type == Type_Boolean && valueBoolean == false);
}
AstArray<const char> getString() const
{
LUAU_ASSERT(type == Type_String);
return {valueString, stringLength};
}
};
void foldConstants(DenseHashMap<AstExpr*, Constant>& constants, DenseHashMap<AstLocal*, Variable>& variables,
DenseHashMap<AstLocal*, Constant>& locals, const DenseHashMap<AstExprCall*, int>* builtins, AstNode* root);
} // namespace Compile
} // namespace Luau

View File

@ -1,392 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "CostModel.h"
#include "Luau/Common.h"
#include "Luau/DenseHash.h"
#include <limits.h>
namespace Luau
{
namespace Compile
{
inline uint64_t parallelAddSat(uint64_t x, uint64_t y)
{
uint64_t r = x + y;
uint64_t s = r & 0x8080808080808080ull; // saturation mask
return (r ^ s) | (s - (s >> 7));
}
static uint64_t parallelMulSat(uint64_t a, int b)
{
int bs = (b < 127) ? b : 127;
// multiply every other value by b, yielding 14-bit products
uint64_t l = bs * ((a >> 0) & 0x007f007f007f007full);
uint64_t h = bs * ((a >> 8) & 0x007f007f007f007full);
// each product is 14-bit, so adding 32768-128 sets high bit iff the sum is 128 or larger without an overflow
uint64_t ls = l + 0x7f807f807f807f80ull;
uint64_t hs = h + 0x7f807f807f807f80ull;
// we now merge saturation bits as well as low 7-bits of each product into one
uint64_t s = (hs & 0x8000800080008000ull) | ((ls & 0x8000800080008000ull) >> 8);
uint64_t r = ((h & 0x007f007f007f007full) << 8) | (l & 0x007f007f007f007full);
// the low bits are now correct for values that didn't saturate, and we simply need to mask them if high bit is 1
return r | (s - (s >> 7));
}
inline bool getNumber(AstExpr* node, double& result)
{
// since constant model doesn't use constant folding atm, we perform the basic extraction that's sufficient to handle positive/negative literals
if (AstExprConstantNumber* ne = node->as<AstExprConstantNumber>())
{
result = ne->value;
return true;
}
if (AstExprUnary* ue = node->as<AstExprUnary>(); ue && ue->op == AstExprUnary::Minus)
if (AstExprConstantNumber* ne = ue->expr->as<AstExprConstantNumber>())
{
result = -ne->value;
return true;
}
return false;
}
struct Cost
{
static const uint64_t kLiteral = ~0ull;
// cost model: 8 bytes, where first byte is the baseline cost, and the next 7 bytes are discounts for when variable #i is constant
uint64_t model;
// constant mask: 8-byte 0xff mask; equal to all ff's for literals, for variables only byte #i (1+) is set to align with model
uint64_t constant;
Cost(int cost = 0, uint64_t constant = 0)
: model(cost < 0x7f ? cost : 0x7f)
, constant(constant)
{
}
Cost operator+(const Cost& other) const
{
Cost result;
result.model = parallelAddSat(model, other.model);
return result;
}
Cost& operator+=(const Cost& other)
{
model = parallelAddSat(model, other.model);
constant = 0;
return *this;
}
Cost operator*(int other) const
{
Cost result;
result.model = parallelMulSat(model, other);
return result;
}
static Cost fold(const Cost& x, const Cost& y)
{
uint64_t newmodel = parallelAddSat(x.model, y.model);
uint64_t newconstant = x.constant & y.constant;
// the extra cost for folding is 1; the discount is 1 for the variable that is shared by x&y (or whichever one is used in x/y if the other is
// literal)
uint64_t extra = (newconstant == kLiteral) ? 0 : (1 | (0x0101010101010101ull & newconstant));
Cost result;
result.model = parallelAddSat(newmodel, extra);
result.constant = newconstant;
return result;
}
};
struct CostVisitor : AstVisitor
{
const DenseHashMap<AstExprCall*, int>& builtins;
DenseHashMap<AstLocal*, uint64_t> vars;
Cost result;
CostVisitor(const DenseHashMap<AstExprCall*, int>& builtins)
: builtins(builtins)
, vars(nullptr)
{
}
Cost model(AstExpr* node)
{
if (AstExprGroup* expr = node->as<AstExprGroup>())
{
return model(expr->expr);
}
else if (node->is<AstExprConstantNil>() || node->is<AstExprConstantBool>() || node->is<AstExprConstantNumber>() ||
node->is<AstExprConstantString>())
{
return Cost(0, Cost::kLiteral);
}
else if (AstExprLocal* expr = node->as<AstExprLocal>())
{
const uint64_t* i = vars.find(expr->local);
return Cost(0, i ? *i : 0); // locals typically don't require extra instructions to compute
}
else if (node->is<AstExprGlobal>())
{
return 1;
}
else if (node->is<AstExprVarargs>())
{
return 3;
}
else if (AstExprCall* expr = node->as<AstExprCall>())
{
// builtin cost modeling is different from regular calls because we use FASTCALL to compile these
// thus we use a cheaper baseline, don't account for function, and assume constant/local copy is free
bool builtin = builtins.find(expr) != nullptr;
bool builtinShort = builtin && expr->args.size <= 2; // FASTCALL1/2
Cost cost = builtin ? 2 : 3;
if (!builtin)
cost += model(expr->func);
for (size_t i = 0; i < expr->args.size; ++i)
{
Cost ac = model(expr->args.data[i]);
// for constants/locals we still need to copy them to the argument list
cost += ac.model == 0 && !builtinShort ? Cost(1) : ac;
}
return cost;
}
else if (AstExprIndexName* expr = node->as<AstExprIndexName>())
{
return model(expr->expr) + 1;
}
else if (AstExprIndexExpr* expr = node->as<AstExprIndexExpr>())
{
return model(expr->expr) + model(expr->index) + 1;
}
else if (AstExprFunction* expr = node->as<AstExprFunction>())
{
return 10; // high baseline cost due to allocation
}
else if (AstExprTable* expr = node->as<AstExprTable>())
{
Cost cost = 10; // high baseline cost due to allocation
for (size_t i = 0; i < expr->items.size; ++i)
{
const AstExprTable::Item& item = expr->items.data[i];
if (item.key)
cost += model(item.key);
cost += model(item.value);
cost += 1;
}
return cost;
}
else if (AstExprUnary* expr = node->as<AstExprUnary>())
{
return Cost::fold(model(expr->expr), Cost(0, Cost::kLiteral));
}
else if (AstExprBinary* expr = node->as<AstExprBinary>())
{
return Cost::fold(model(expr->left), model(expr->right));
}
else if (AstExprTypeAssertion* expr = node->as<AstExprTypeAssertion>())
{
return model(expr->expr);
}
else if (AstExprIfElse* expr = node->as<AstExprIfElse>())
{
return model(expr->condition) + model(expr->trueExpr) + model(expr->falseExpr) + 2;
}
else if (AstExprInterpString* expr = node->as<AstExprInterpString>())
{
// Baseline cost of string.format
Cost cost = 3;
for (AstExpr* innerExpression : expr->expressions)
cost += model(innerExpression);
return cost;
}
else
{
LUAU_ASSERT(!"Unknown expression type");
return {};
}
}
void assign(AstExpr* expr)
{
// variable assignments reset variable mask, so that further uses of this variable aren't discounted
// this doesn't work perfectly with backwards control flow like loops, but is good enough for a single pass
if (AstExprLocal* lv = expr->as<AstExprLocal>())
if (uint64_t* i = vars.find(lv->local))
*i = 0;
}
void loop(AstStatBlock* body, Cost iterCost, int factor = 3)
{
Cost before = result;
result = Cost();
body->visit(this);
result = before + (result + iterCost) * factor;
}
bool visit(AstExpr* node) override
{
// note: we short-circuit the visitor traversal through any expression trees by returning false
// recursive traversal is happening inside model() which makes it easier to get the resulting value of the subexpression
result += model(node);
return false;
}
bool visit(AstStatFor* node) override
{
result += model(node->from);
result += model(node->to);
if (node->step)
result += model(node->step);
int tripCount = -1;
double from, to, step = 1;
if (getNumber(node->from, from) && getNumber(node->to, to) && (!node->step || getNumber(node->step, step)))
tripCount = getTripCount(from, to, step);
loop(node->body, 1, tripCount < 0 ? 3 : tripCount);
return false;
}
bool visit(AstStatForIn* node) override
{
for (size_t i = 0; i < node->values.size; ++i)
result += model(node->values.data[i]);
loop(node->body, 1);
return false;
}
bool visit(AstStatWhile* node) override
{
Cost condition = model(node->condition);
loop(node->body, condition);
return false;
}
bool visit(AstStatRepeat* node) override
{
Cost condition = model(node->condition);
loop(node->body, condition);
return false;
}
bool visit(AstStat* node) override
{
if (node->is<AstStatIf>())
result += 2;
else if (node->is<AstStatBreak>() || node->is<AstStatContinue>())
result += 1;
return true;
}
bool visit(AstStatLocal* node) override
{
for (size_t i = 0; i < node->values.size; ++i)
{
Cost arg = model(node->values.data[i]);
// propagate constant mask from expression through variables
if (arg.constant && i < node->vars.size)
vars[node->vars.data[i]] = arg.constant;
result += arg;
}
return false;
}
bool visit(AstStatAssign* node) override
{
for (size_t i = 0; i < node->vars.size; ++i)
assign(node->vars.data[i]);
return true;
}
bool visit(AstStatCompoundAssign* node) override
{
assign(node->var);
// if lhs is not a local, setting it requires an extra table operation
result += node->var->is<AstExprLocal>() ? 1 : 2;
return true;
}
};
uint64_t modelCost(AstNode* root, AstLocal* const* vars, size_t varCount, const DenseHashMap<AstExprCall*, int>& builtins)
{
CostVisitor visitor{builtins};
for (size_t i = 0; i < varCount && i < 7; ++i)
visitor.vars[vars[i]] = 0xffull << (i * 8 + 8);
root->visit(&visitor);
return visitor.result.model;
}
int computeCost(uint64_t model, const bool* varsConst, size_t varCount)
{
int cost = int(model & 0x7f);
// don't apply discounts to what is likely a saturated sum
if (cost == 0x7f)
return cost;
for (size_t i = 0; i < varCount && i < 7; ++i)
cost -= int((model >> (i * 8 + 8)) & 0x7f) * varsConst[i];
return cost;
}
int getTripCount(double from, double to, double step)
{
// we compute trip count in integers because that way we know that the loop math (repeated addition) is precise
int fromi = (from >= -32767 && from <= 32767 && double(int(from)) == from) ? int(from) : INT_MIN;
int toi = (to >= -32767 && to <= 32767 && double(int(to)) == to) ? int(to) : INT_MIN;
int stepi = (step >= -32767 && step <= 32767 && double(int(step)) == step) ? int(step) : INT_MIN;
if (fromi == INT_MIN || toi == INT_MIN || stepi == INT_MIN || stepi == 0)
return -1;
if ((stepi < 0 && toi > fromi) || (stepi > 0 && toi < fromi))
return 0;
return (toi - fromi) / stepi + 1;
}
} // namespace Compile
} // namespace Luau

View File

@ -1,22 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Ast.h"
#include "Luau/DenseHash.h"
namespace Luau
{
namespace Compile
{
// cost model: 8 bytes, where first byte is the baseline cost, and the next 7 bytes are discounts for when variable #i is constant
uint64_t modelCost(AstNode* root, AstLocal* const* vars, size_t varCount, const DenseHashMap<AstExprCall*, int>& builtins);
// cost is computed as B - sum(Di * Ci), where B is baseline cost, Di is the discount for each variable and Ci is 1 when variable #i is constant
int computeCost(uint64_t model, const bool* varsConst, size_t varCount);
// get loop trip count or -1 if we can't compute it precisely
int getTripCount(double from, double to, double step);
} // namespace Compile
} // namespace Luau

View File

@ -1,158 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "TableShape.h"
namespace Luau
{
namespace Compile
{
// conservative limit for the loop bound that establishes table array size
static const int kMaxLoopBound = 16;
static AstExprTable* getTableHint(AstExpr* expr)
{
// unadorned table literal
if (AstExprTable* table = expr->as<AstExprTable>())
return table;
// setmetatable(table literal, ...)
if (AstExprCall* call = expr->as<AstExprCall>(); call && !call->self && call->args.size == 2)
if (AstExprGlobal* func = call->func->as<AstExprGlobal>(); func && func->name == "setmetatable")
if (AstExprTable* table = call->args.data[0]->as<AstExprTable>())
return table;
return nullptr;
}
struct ShapeVisitor : AstVisitor
{
struct Hasher
{
size_t operator()(const std::pair<AstExprTable*, AstName>& p) const
{
return DenseHashPointer()(p.first) ^ std::hash<AstName>()(p.second);
}
};
DenseHashMap<AstExprTable*, TableShape>& shapes;
DenseHashMap<AstLocal*, AstExprTable*> tables;
DenseHashSet<std::pair<AstExprTable*, AstName>, Hasher> fields;
DenseHashMap<AstLocal*, unsigned int> loops; // iterator => upper bound for 1..k
ShapeVisitor(DenseHashMap<AstExprTable*, TableShape>& shapes)
: shapes(shapes)
, tables(nullptr)
, fields(std::pair<AstExprTable*, AstName>())
, loops(nullptr)
{
}
void assignField(AstExpr* expr, AstName index)
{
if (AstExprLocal* lv = expr->as<AstExprLocal>())
{
if (AstExprTable** table = tables.find(lv->local))
{
std::pair<AstExprTable*, AstName> field = {*table, index};
if (!fields.contains(field))
{
fields.insert(field);
shapes[*table].hashSize += 1;
}
}
}
}
void assignField(AstExpr* expr, AstExpr* index)
{
AstExprLocal* lv = expr->as<AstExprLocal>();
if (!lv)
return;
AstExprTable** table = tables.find(lv->local);
if (!table)
return;
if (AstExprConstantNumber* number = index->as<AstExprConstantNumber>())
{
TableShape& shape = shapes[*table];
if (number->value == double(shape.arraySize + 1))
shape.arraySize += 1;
}
else if (AstExprLocal* iter = index->as<AstExprLocal>())
{
if (const unsigned int* bound = loops.find(iter->local))
{
TableShape& shape = shapes[*table];
if (shape.arraySize == 0)
shape.arraySize = *bound;
}
}
}
void assign(AstExpr* var)
{
if (AstExprIndexName* index = var->as<AstExprIndexName>())
{
assignField(index->expr, index->index);
}
else if (AstExprIndexExpr* index = var->as<AstExprIndexExpr>())
{
assignField(index->expr, index->index);
}
}
bool visit(AstStatLocal* node) override
{
// track local -> table association so that we can update table size prediction in assignField
if (node->vars.size == 1 && node->values.size == 1)
if (AstExprTable* table = getTableHint(node->values.data[0]); table && table->items.size == 0)
tables[node->vars.data[0]] = table;
return true;
}
bool visit(AstStatAssign* node) override
{
for (size_t i = 0; i < node->vars.size; ++i)
assign(node->vars.data[i]);
for (size_t i = 0; i < node->values.size; ++i)
node->values.data[i]->visit(this);
return false;
}
bool visit(AstStatFunction* node) override
{
assign(node->name);
node->func->visit(this);
return false;
}
bool visit(AstStatFor* node) override
{
AstExprConstantNumber* from = node->from->as<AstExprConstantNumber>();
AstExprConstantNumber* to = node->to->as<AstExprConstantNumber>();
if (from && to && from->value == 1.0 && to->value >= 1.0 && to->value <= double(kMaxLoopBound) && !node->step)
loops[node->var] = unsigned(to->value);
return true;
}
};
void predictTableShapes(DenseHashMap<AstExprTable*, TableShape>& shapes, AstNode* root)
{
ShapeVisitor visitor{shapes};
root->visit(&visitor);
}
} // namespace Compile
} // namespace Luau

View File

@ -1,21 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Ast.h"
#include "Luau/DenseHash.h"
namespace Luau
{
namespace Compile
{
struct TableShape
{
unsigned int arraySize = 0;
unsigned int hashSize = 0;
};
void predictTableShapes(DenseHashMap<AstExprTable*, TableShape>& shapes, AstNode* root);
} // namespace Compile
} // namespace Luau

View File

@ -1,106 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/BytecodeBuilder.h"
#include "Types.h"
namespace Luau
{
static LuauBytecodeEncodedType getType(AstType* ty)
{
if (AstTypeReference* ref = ty->as<AstTypeReference>())
{
if (ref->name == "nil")
return LBC_TYPE_NIL;
else if (ref->name == "boolean")
return LBC_TYPE_BOOLEAN;
else if (ref->name == "number")
return LBC_TYPE_NUMBER;
else if (ref->name == "string")
return LBC_TYPE_STRING;
else if (ref->name == "thread")
return LBC_TYPE_THREAD;
else if (ref->name == "any" || ref->name == "unknown")
return LBC_TYPE_ANY;
}
else if (AstTypeTable* table = ty->as<AstTypeTable>())
{
return LBC_TYPE_TABLE;
}
else if (AstTypeFunction* func = ty->as<AstTypeFunction>())
{
return LBC_TYPE_FUNCTION;
}
else if (AstTypeUnion* un = ty->as<AstTypeUnion>())
{
bool optional = false;
LuauBytecodeEncodedType type = LBC_TYPE_INVALID;
for (AstType* ty : un->types)
{
LuauBytecodeEncodedType et = getType(ty);
if (et == LBC_TYPE_NIL)
{
optional = true;
continue;
}
if (type == LBC_TYPE_INVALID)
{
type = et;
continue;
}
if (type != et)
return LBC_TYPE_ANY;
}
if (type == LBC_TYPE_INVALID)
return LBC_TYPE_ANY;
return LuauBytecodeEncodedType(type | (optional && (type != LBC_TYPE_ANY) ? LBC_TYPE_OPTIONAL_BIT : 0));
}
else if (AstTypeIntersection* inter = ty->as<AstTypeIntersection>())
{
return LBC_TYPE_ANY;
}
return LBC_TYPE_ANY;
}
std::string getFunctionType(const AstExprFunction* func)
{
if (func->vararg || func->generics.size || func->genericPacks.size)
return {};
bool self = func->self != 0;
std::string typeInfo;
typeInfo.reserve(func->args.size + self + 2);
typeInfo.push_back(LBC_TYPE_FUNCTION);
typeInfo.push_back(uint8_t(self + func->args.size));
if (self)
typeInfo.push_back(LBC_TYPE_TABLE);
bool haveNonAnyParam = false;
for (AstLocal* arg : func->args)
{
LuauBytecodeEncodedType ty = arg->annotation ? getType(arg->annotation) : LBC_TYPE_ANY;
if (ty != LBC_TYPE_ANY)
haveNonAnyParam = true;
typeInfo.push_back(ty);
}
// If all parameters simplify to any, we can just omit type info for this function
if (!haveNonAnyParam)
return {};
return typeInfo;
}
} // namespace Luau

View File

@ -1,9 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Ast.h"
namespace Luau
{
std::string getFunctionType(const AstExprFunction* func);
} // namespace Luau

View File

@ -1,103 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "ValueTracking.h"
#include "Luau/Lexer.h"
namespace Luau
{
namespace Compile
{
struct ValueVisitor : AstVisitor
{
DenseHashMap<AstName, Global>& globals;
DenseHashMap<AstLocal*, Variable>& variables;
ValueVisitor(DenseHashMap<AstName, Global>& globals, DenseHashMap<AstLocal*, Variable>& variables)
: globals(globals)
, variables(variables)
{
}
void assign(AstExpr* var)
{
if (AstExprLocal* lv = var->as<AstExprLocal>())
{
variables[lv->local].written = true;
}
else if (AstExprGlobal* gv = var->as<AstExprGlobal>())
{
globals[gv->name] = Global::Written;
}
else
{
// we need to be able to track assignments in all expressions, including crazy ones like t[function() t = nil end] = 5
var->visit(this);
}
}
bool visit(AstStatLocal* node) override
{
for (size_t i = 0; i < node->vars.size && i < node->values.size; ++i)
variables[node->vars.data[i]].init = node->values.data[i];
for (size_t i = node->values.size; i < node->vars.size; ++i)
variables[node->vars.data[i]].init = nullptr;
return true;
}
bool visit(AstStatAssign* node) override
{
for (size_t i = 0; i < node->vars.size; ++i)
assign(node->vars.data[i]);
for (size_t i = 0; i < node->values.size; ++i)
node->values.data[i]->visit(this);
return false;
}
bool visit(AstStatCompoundAssign* node) override
{
assign(node->var);
node->value->visit(this);
return false;
}
bool visit(AstStatLocalFunction* node) override
{
variables[node->name].init = node->func;
return true;
}
bool visit(AstStatFunction* node) override
{
assign(node->name);
node->func->visit(this);
return false;
}
};
void assignMutable(DenseHashMap<AstName, Global>& globals, const AstNameTable& names, const char** mutableGlobals)
{
if (AstName name = names.get("_G"); name.value)
globals[name] = Global::Mutable;
if (mutableGlobals)
for (const char** ptr = mutableGlobals; *ptr; ++ptr)
if (AstName name = names.get(*ptr); name.value)
globals[name] = Global::Mutable;
}
void trackValues(DenseHashMap<AstName, Global>& globals, DenseHashMap<AstLocal*, Variable>& variables, AstNode* root)
{
ValueVisitor visitor{globals, variables};
root->visit(&visitor);
}
} // namespace Compile
} // namespace Luau

View File

@ -1,42 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Ast.h"
#include "Luau/DenseHash.h"
namespace Luau
{
class AstNameTable;
}
namespace Luau
{
namespace Compile
{
enum class Global
{
Default = 0,
Mutable, // builtin that has contents unknown at compile time, blocks GETIMPORT for chains
Written, // written in the code which means we can't reason about the value
};
struct Variable
{
AstExpr* init = nullptr; // initial value of the variable; filled by trackValues
bool written = false; // is the variable ever assigned to? filled by trackValues
bool constant = false; // is the variable's value a compile-time constant? filled by constantFold
};
void assignMutable(DenseHashMap<AstName, Global>& globals, const AstNameTable& names, const char** mutableGlobals);
void trackValues(DenseHashMap<AstName, Global>& globals, DenseHashMap<AstLocal*, Variable>& variables, AstNode* root);
inline Global getGlobalState(const DenseHashMap<AstName, Global>& globals, AstName name)
{
const Global* it = globals.find(name);
return it ? *it : Global::Default;
}
} // namespace Compile
} // namespace Luau

View File

@ -1,29 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "luacode.h"
#include "Luau/Compiler.h"
#include <string.h>
char* luau_compile(const char* source, size_t size, lua_CompileOptions* options, size_t* outsize)
{
LUAU_ASSERT(outsize);
Luau::CompileOptions opts;
if (options)
{
static_assert(sizeof(lua_CompileOptions) == sizeof(Luau::CompileOptions), "C and C++ interface must match");
memcpy(static_cast<void*>(&opts), options, sizeof(opts));
}
std::string result = compile(std::string(source, size), opts);
char* copy = static_cast<char*>(malloc(result.size()));
if (!copy)
return nullptr;
memcpy(copy, result.data(), result.size());
*outsize = result.size();
return copy;
}

View File

@ -1,467 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include "luaconf.h"
// option for multiple returns in `lua_pcall' and `lua_call'
#define LUA_MULTRET (-1)
/*
** pseudo-indices
*/
#define LUA_REGISTRYINDEX (-LUAI_MAXCSTACK - 2000)
#define LUA_ENVIRONINDEX (-LUAI_MAXCSTACK - 2001)
#define LUA_GLOBALSINDEX (-LUAI_MAXCSTACK - 2002)
#define lua_upvalueindex(i) (LUA_GLOBALSINDEX - (i))
#define lua_ispseudo(i) ((i) <= LUA_REGISTRYINDEX)
// thread status; 0 is OK
enum lua_Status
{
LUA_OK = 0,
LUA_YIELD,
LUA_ERRRUN,
LUA_ERRSYNTAX, // legacy error code, preserved for compatibility
LUA_ERRMEM,
LUA_ERRERR,
LUA_BREAK, // yielded for a debug breakpoint
};
enum lua_CoStatus
{
LUA_CORUN = 0, // running
LUA_COSUS, // suspended
LUA_CONOR, // 'normal' (it resumed another coroutine)
LUA_COFIN, // finished
LUA_COERR, // finished with error
};
typedef struct lua_State lua_State;
typedef int (*lua_CFunction)(lua_State* L);
typedef int (*lua_Continuation)(lua_State* L, int status);
/*
** prototype for memory-allocation functions
*/
typedef void* (*lua_Alloc)(void* ud, void* ptr, size_t osize, size_t nsize);
// non-return type
#define l_noret void LUA_NORETURN
/*
** basic types
*/
#define LUA_TNONE (-1)
/*
* WARNING: if you change the order of this enumeration,
* grep "ORDER TYPE"
*/
// clang-format off
enum lua_Type
{
LUA_TNIL = 0, // must be 0 due to lua_isnoneornil
LUA_TBOOLEAN = 1, // must be 1 due to l_isfalse
LUA_TLIGHTUSERDATA,
LUA_TNUMBER,
LUA_TVECTOR,
LUA_TSTRING, // all types above this must be value types, all types below this must be GC types - see iscollectable
LUA_TTABLE,
LUA_TFUNCTION,
LUA_TUSERDATA,
LUA_TTHREAD,
// values below this line are used in GCObject tags but may never show up in TValue type tags
LUA_TPROTO,
LUA_TUPVAL,
LUA_TDEADKEY,
// the count of TValue type tags
LUA_T_COUNT = LUA_TPROTO
};
// clang-format on
// type of numbers in Luau
typedef double lua_Number;
// type for integer functions
typedef int lua_Integer;
// unsigned integer type
typedef unsigned lua_Unsigned;
/*
** state manipulation
*/
LUA_API lua_State* lua_newstate(lua_Alloc f, void* ud);
LUA_API void lua_close(lua_State* L);
LUA_API lua_State* lua_newthread(lua_State* L);
LUA_API lua_State* lua_mainthread(lua_State* L);
LUA_API void lua_resetthread(lua_State* L);
LUA_API int lua_isthreadreset(lua_State* L);
/*
** basic stack manipulation
*/
LUA_API int lua_absindex(lua_State* L, int idx);
LUA_API int lua_gettop(lua_State* L);
LUA_API void lua_settop(lua_State* L, int idx);
LUA_API void lua_pushvalue(lua_State* L, int idx);
LUA_API void lua_remove(lua_State* L, int idx);
LUA_API void lua_insert(lua_State* L, int idx);
LUA_API void lua_replace(lua_State* L, int idx);
LUA_API int lua_checkstack(lua_State* L, int sz);
LUA_API void lua_rawcheckstack(lua_State* L, int sz); // allows for unlimited stack frames
LUA_API void lua_xmove(lua_State* from, lua_State* to, int n);
LUA_API void lua_xpush(lua_State* from, lua_State* to, int idx);
/*
** access functions (stack -> C)
*/
LUA_API int lua_isnumber(lua_State* L, int idx);
LUA_API int lua_isstring(lua_State* L, int idx);
LUA_API int lua_iscfunction(lua_State* L, int idx);
LUA_API int lua_isLfunction(lua_State* L, int idx);
LUA_API int lua_isuserdata(lua_State* L, int idx);
LUA_API int lua_type(lua_State* L, int idx);
LUA_API const char* lua_typename(lua_State* L, int tp);
LUA_API int lua_equal(lua_State* L, int idx1, int idx2);
LUA_API int lua_rawequal(lua_State* L, int idx1, int idx2);
LUA_API int lua_lessthan(lua_State* L, int idx1, int idx2);
LUA_API double lua_tonumberx(lua_State* L, int idx, int* isnum);
LUA_API int lua_tointegerx(lua_State* L, int idx, int* isnum);
LUA_API unsigned lua_tounsignedx(lua_State* L, int idx, int* isnum);
LUA_API const float* lua_tovector(lua_State* L, int idx);
LUA_API int lua_toboolean(lua_State* L, int idx);
LUA_API const char* lua_tolstring(lua_State* L, int idx, size_t* len);
LUA_API const char* lua_tostringatom(lua_State* L, int idx, int* atom);
LUA_API const char* lua_namecallatom(lua_State* L, int* atom);
LUA_API int lua_objlen(lua_State* L, int idx);
LUA_API lua_CFunction lua_tocfunction(lua_State* L, int idx);
LUA_API void* lua_tolightuserdata(lua_State* L, int idx);
LUA_API void* lua_touserdata(lua_State* L, int idx);
LUA_API void* lua_touserdatatagged(lua_State* L, int idx, int tag);
LUA_API int lua_userdatatag(lua_State* L, int idx);
LUA_API lua_State* lua_tothread(lua_State* L, int idx);
LUA_API const void* lua_topointer(lua_State* L, int idx);
/*
** push functions (C -> stack)
*/
LUA_API void lua_pushnil(lua_State* L);
LUA_API void lua_pushnumber(lua_State* L, double n);
LUA_API void lua_pushinteger(lua_State* L, int n);
LUA_API void lua_pushunsigned(lua_State* L, unsigned n);
#if LUA_VECTOR_SIZE == 4
LUA_API void lua_pushvector(lua_State* L, float x, float y, float z, float w);
#else
LUA_API void lua_pushvector(lua_State* L, float x, float y, float z);
#endif
LUA_API void lua_pushlstring(lua_State* L, const char* s, size_t l);
LUA_API void lua_pushstring(lua_State* L, const char* s);
LUA_API const char* lua_pushvfstring(lua_State* L, const char* fmt, va_list argp);
LUA_API LUA_PRINTF_ATTR(2, 3) const char* lua_pushfstringL(lua_State* L, const char* fmt, ...);
LUA_API void lua_pushcclosurek(lua_State* L, lua_CFunction fn, const char* debugname, int nup, lua_Continuation cont);
LUA_API void lua_pushboolean(lua_State* L, int b);
LUA_API int lua_pushthread(lua_State* L);
LUA_API void lua_pushlightuserdata(lua_State* L, void* p);
LUA_API void* lua_newuserdatatagged(lua_State* L, size_t sz, int tag);
LUA_API void* lua_newuserdatadtor(lua_State* L, size_t sz, void (*dtor)(void*));
/*
** get functions (Lua -> stack)
*/
LUA_API int lua_gettable(lua_State* L, int idx);
LUA_API int lua_getfield(lua_State* L, int idx, const char* k);
LUA_API int lua_rawgetfield(lua_State* L, int idx, const char* k);
LUA_API int lua_rawget(lua_State* L, int idx);
LUA_API int lua_rawgeti(lua_State* L, int idx, int n);
LUA_API void lua_createtable(lua_State* L, int narr, int nrec);
LUA_API void lua_setreadonly(lua_State* L, int idx, int enabled);
LUA_API int lua_getreadonly(lua_State* L, int idx);
LUA_API void lua_setsafeenv(lua_State* L, int idx, int enabled);
LUA_API int lua_getmetatable(lua_State* L, int objindex);
LUA_API void lua_getfenv(lua_State* L, int idx);
/*
** set functions (stack -> Lua)
*/
LUA_API void lua_settable(lua_State* L, int idx);
LUA_API void lua_setfield(lua_State* L, int idx, const char* k);
LUA_API void lua_rawsetfield(lua_State* L, int idx, const char* k);
LUA_API void lua_rawset(lua_State* L, int idx);
LUA_API void lua_rawseti(lua_State* L, int idx, int n);
LUA_API int lua_setmetatable(lua_State* L, int objindex);
LUA_API int lua_setfenv(lua_State* L, int idx);
/*
** `load' and `call' functions (load and run Luau bytecode)
*/
LUA_API int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size, int env);
LUA_API void lua_call(lua_State* L, int nargs, int nresults);
LUA_API int lua_pcall(lua_State* L, int nargs, int nresults, int errfunc);
/*
** coroutine functions
*/
LUA_API int lua_yield(lua_State* L, int nresults);
LUA_API int lua_break(lua_State* L);
LUA_API int lua_resume(lua_State* L, lua_State* from, int narg);
LUA_API int lua_resumeerror(lua_State* L, lua_State* from);
LUA_API int lua_status(lua_State* L);
LUA_API int lua_isyieldable(lua_State* L);
LUA_API void* lua_getthreaddata(lua_State* L);
LUA_API void lua_setthreaddata(lua_State* L, void* data);
LUA_API int lua_costatus(lua_State* L, lua_State* co);
/*
** garbage-collection function and options
*/
enum lua_GCOp
{
// stop and resume incremental garbage collection
LUA_GCSTOP,
LUA_GCRESTART,
// run a full GC cycle; not recommended for latency sensitive applications
LUA_GCCOLLECT,
// return the heap size in KB and the remainder in bytes
LUA_GCCOUNT,
LUA_GCCOUNTB,
// return 1 if GC is active (not stopped); note that GC may not be actively collecting even if it's running
LUA_GCISRUNNING,
/*
** perform an explicit GC step, with the step size specified in KB
**
** garbage collection is handled by 'assists' that perform some amount of GC work matching pace of allocation
** explicit GC steps allow to perform some amount of work at custom points to offset the need for GC assists
** note that GC might also be paused for some duration (until bytes allocated meet the threshold)
** if an explicit step is performed during this pause, it will trigger the start of the next collection cycle
*/
LUA_GCSTEP,
/*
** tune GC parameters G (goal), S (step multiplier) and step size (usually best left ignored)
**
** garbage collection is incremental and tries to maintain the heap size to balance memory and performance overhead
** this overhead is determined by G (goal) which is the ratio between total heap size and the amount of live data in it
** G is specified in percentages; by default G=200% which means that the heap is allowed to grow to ~2x the size of live data.
**
** collector tries to collect S% of allocated bytes by interrupting the application after step size bytes were allocated.
** when S is too small, collector may not be able to catch up and the effective goal that can be reached will be larger.
** S is specified in percentages; by default S=200% which means that collector will run at ~2x the pace of allocations.
**
** it is recommended to set S in the interval [100 / (G - 100), 100 + 100 / (G - 100))] with a minimum value of 150%; for example:
** - for G=200%, S should be in the interval [150%, 200%]
** - for G=150%, S should be in the interval [200%, 300%]
** - for G=125%, S should be in the interval [400%, 500%]
*/
LUA_GCSETGOAL,
LUA_GCSETSTEPMUL,
LUA_GCSETSTEPSIZE,
};
LUA_API int lua_gc(lua_State* L, int what, int data);
/*
** memory statistics
** all allocated bytes are attributed to the memory category of the running thread (0..LUA_MEMORY_CATEGORIES-1)
*/
LUA_API void lua_setmemcat(lua_State* L, int category);
LUA_API size_t lua_totalbytes(lua_State* L, int category);
/*
** miscellaneous functions
*/
LUA_API l_noret lua_error(lua_State* L);
LUA_API int lua_next(lua_State* L, int idx);
LUA_API int lua_rawiter(lua_State* L, int idx, int iter);
LUA_API void lua_concat(lua_State* L, int n);
LUA_API uintptr_t lua_encodepointer(lua_State* L, uintptr_t p);
LUA_API double lua_clock();
LUA_API void lua_setuserdatatag(lua_State* L, int idx, int tag);
typedef void (*lua_Destructor)(lua_State* L, void* userdata);
LUA_API void lua_setuserdatadtor(lua_State* L, int tag, lua_Destructor dtor);
LUA_API lua_Destructor lua_getuserdatadtor(lua_State* L, int tag);
LUA_API void lua_clonefunction(lua_State* L, int idx);
LUA_API void lua_cleartable(lua_State* L, int idx);
/*
** reference system, can be used to pin objects
*/
#define LUA_NOREF -1
#define LUA_REFNIL 0
LUA_API int lua_ref(lua_State* L, int idx);
LUA_API void lua_unref(lua_State* L, int ref);
#define lua_getref(L, ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref))
/*
** ===============================================================
** some useful macros
** ===============================================================
*/
#define lua_tonumber(L, i) lua_tonumberx(L, i, NULL)
#define lua_tointeger(L, i) lua_tointegerx(L, i, NULL)
#define lua_tounsigned(L, i) lua_tounsignedx(L, i, NULL)
#define lua_pop(L, n) lua_settop(L, -(n)-1)
#define lua_newtable(L) lua_createtable(L, 0, 0)
#define lua_newuserdata(L, s) lua_newuserdatatagged(L, s, 0)
#define lua_strlen(L, i) lua_objlen(L, (i))
#define lua_isfunction(L, n) (lua_type(L, (n)) == LUA_TFUNCTION)
#define lua_istable(L, n) (lua_type(L, (n)) == LUA_TTABLE)
#define lua_islightuserdata(L, n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA)
#define lua_isnil(L, n) (lua_type(L, (n)) == LUA_TNIL)
#define lua_isboolean(L, n) (lua_type(L, (n)) == LUA_TBOOLEAN)
#define lua_isvector(L, n) (lua_type(L, (n)) == LUA_TVECTOR)
#define lua_isthread(L, n) (lua_type(L, (n)) == LUA_TTHREAD)
#define lua_isnone(L, n) (lua_type(L, (n)) == LUA_TNONE)
#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= LUA_TNIL)
#define lua_pushliteral(L, s) lua_pushlstring(L, "" s, (sizeof(s) / sizeof(char)) - 1)
#define lua_pushcfunction(L, fn, debugname) lua_pushcclosurek(L, fn, debugname, 0, NULL)
#define lua_pushcclosure(L, fn, debugname, nup) lua_pushcclosurek(L, fn, debugname, nup, NULL)
#define lua_setglobal(L, s) lua_setfield(L, LUA_GLOBALSINDEX, (s))
#define lua_getglobal(L, s) lua_getfield(L, LUA_GLOBALSINDEX, (s))
#define lua_tostring(L, i) lua_tolstring(L, (i), NULL)
#define lua_pushfstring(L, fmt, ...) lua_pushfstringL(L, fmt, ##__VA_ARGS__)
/*
** {======================================================================
** Debug API
** =======================================================================
*/
typedef struct lua_Debug lua_Debug; // activation record
// Functions to be called by the debugger in specific events
typedef void (*lua_Hook)(lua_State* L, lua_Debug* ar);
LUA_API int lua_stackdepth(lua_State* L);
LUA_API int lua_getinfo(lua_State* L, int level, const char* what, lua_Debug* ar);
LUA_API int lua_getargument(lua_State* L, int level, int n);
LUA_API const char* lua_getlocal(lua_State* L, int level, int n);
LUA_API const char* lua_setlocal(lua_State* L, int level, int n);
LUA_API const char* lua_getupvalue(lua_State* L, int funcindex, int n);
LUA_API const char* lua_setupvalue(lua_State* L, int funcindex, int n);
LUA_API void lua_singlestep(lua_State* L, int enabled);
LUA_API int lua_breakpoint(lua_State* L, int funcindex, int line, int enabled);
typedef void (*lua_Coverage)(void* context, const char* function, int linedefined, int depth, const int* hits, size_t size);
LUA_API void lua_getcoverage(lua_State* L, int funcindex, void* context, lua_Coverage callback);
// Warning: this function is not thread-safe since it stores the result in a shared global array! Only use for debugging.
LUA_API const char* lua_debugtrace(lua_State* L);
struct lua_Debug
{
const char* name; // (n)
const char* what; // (s) `Lua', `C', `main', `tail'
const char* source; // (s)
const char* short_src; // (s)
int linedefined; // (s)
int currentline; // (l)
unsigned char nupvals; // (u) number of upvalues
unsigned char nparams; // (a) number of parameters
char isvararg; // (a)
void* userdata; // only valid in luau_callhook
char ssbuf[LUA_IDSIZE];
};
// }======================================================================
/* Callbacks that can be used to reconfigure behavior of the VM dynamically.
* These are shared between all coroutines.
*
* Note: interrupt is safe to set from an arbitrary thread but all other callbacks
* can only be changed when the VM is not running any code */
struct lua_Callbacks
{
void* userdata; // arbitrary userdata pointer that is never overwritten by Luau
void (*interrupt)(lua_State* L, int gc); // gets called at safepoints (loop back edges, call/ret, gc) if set
void (*panic)(lua_State* L, int errcode); // gets called when an unprotected error is raised (if longjmp is used)
void (*userthread)(lua_State* LP, lua_State* L); // gets called when L is created (LP == parent) or destroyed (LP == NULL)
int16_t (*useratom)(const char* s, size_t l); // gets called when a string is created; returned atom can be retrieved via tostringatom
void (*debugbreak)(lua_State* L, lua_Debug* ar); // gets called when BREAK instruction is encountered
void (*debugstep)(lua_State* L, lua_Debug* ar); // gets called after each instruction in single step mode
void (*debuginterrupt)(lua_State* L, lua_Debug* ar); // gets called when thread execution is interrupted by break in another thread
void (*debugprotectederror)(lua_State* L); // gets called when protected call results in an error
};
typedef struct lua_Callbacks lua_Callbacks;
LUA_API lua_Callbacks* lua_callbacks(lua_State* L);
/******************************************************************************
* Copyright (c) 2019-2022 Roblox Corporation
* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/

View File

@ -1,150 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
// When debugging complex issues, consider enabling one of these:
// This will reallocate the stack very aggressively at every opportunity; use this with asan to catch stale stack pointers
// #define HARDSTACKTESTS 1
// This will call GC validation very aggressively at every incremental GC step; use this with caution as it's SLOW
// #define HARDMEMTESTS 1
// This will call GC validation very aggressively at every GC opportunity; use this with caution as it's VERY SLOW
// #define HARDMEMTESTS 2
// To force MSVC2017+ to generate SSE2 code for some stdlib functions we need to locally enable /fp:fast
// Note that /fp:fast changes the semantics of floating point comparisons so this is only safe to do for functions without ones
#if defined(_MSC_VER) && !defined(__clang__)
#define LUAU_FASTMATH_BEGIN __pragma(float_control(precise, off, push))
#define LUAU_FASTMATH_END __pragma(float_control(pop))
#else
#define LUAU_FASTMATH_BEGIN
#define LUAU_FASTMATH_END
#endif
// Some functions like floor/ceil have SSE4.1 equivalents but we currently support systems without SSE4.1
// Note that we only need to do this when SSE4.1 support is not guaranteed by compiler settings, as otherwise compiler will optimize these for us.
#if (defined(__x86_64__) || defined(_M_X64)) && !defined(__SSE4_1__) && !defined(__AVX__)
#if defined(_MSC_VER) && !defined(__clang__)
#define LUAU_TARGET_SSE41
#elif defined(__GNUC__) && defined(__has_attribute)
#if __has_attribute(target)
#define LUAU_TARGET_SSE41 __attribute__((target("sse4.1")))
#endif
#endif
#endif
// Used on functions that have a printf-like interface to validate them statically
#if defined(__GNUC__)
#define LUA_PRINTF_ATTR(fmt, arg) __attribute__((format(printf, fmt, arg)))
#else
#define LUA_PRINTF_ATTR(fmt, arg)
#endif
#ifdef _MSC_VER
#define LUA_NORETURN __declspec(noreturn)
#else
#define LUA_NORETURN __attribute__((__noreturn__))
#endif
// Can be used to reconfigure visibility/exports for public APIs
#ifndef LUA_API
#define LUA_API extern
#endif
#define LUALIB_API LUA_API
// Can be used to reconfigure visibility for internal APIs
#if defined(__GNUC__)
#define LUAI_FUNC __attribute__((visibility("hidden"))) extern
#define LUAI_DATA LUAI_FUNC
#else
#define LUAI_FUNC extern
#define LUAI_DATA extern
#endif
// Can be used to reconfigure internal error handling to use longjmp instead of C++ EH
#ifndef LUA_USE_LONGJMP
#define LUA_USE_LONGJMP 0
#endif
// LUA_IDSIZE gives the maximum size for the description of the source
#ifndef LUA_IDSIZE
#define LUA_IDSIZE 256
#endif
// LUA_MINSTACK is the guaranteed number of Lua stack slots available to a C function
#ifndef LUA_MINSTACK
#define LUA_MINSTACK 20
#endif
// LUAI_MAXCSTACK limits the number of Lua stack slots that a C function can use
#ifndef LUAI_MAXCSTACK
#define LUAI_MAXCSTACK 8000
#endif
// LUAI_MAXCALLS limits the number of nested calls
#ifndef LUAI_MAXCALLS
#define LUAI_MAXCALLS 20000
#endif
// LUAI_MAXCCALLS is the maximum depth for nested C calls; this limit depends on native stack size
#ifndef LUAI_MAXCCALLS
#define LUAI_MAXCCALLS 200
#endif
// buffer size used for on-stack string operations; this limit depends on native stack size
#ifndef LUA_BUFFERSIZE
#define LUA_BUFFERSIZE 512
#endif
// number of valid Lua userdata tags
#ifndef LUA_UTAG_LIMIT
#define LUA_UTAG_LIMIT 128
#endif
// upper bound for number of size classes used by page allocator
#ifndef LUA_SIZECLASSES
#define LUA_SIZECLASSES 32
#endif
// available number of separate memory categories
#ifndef LUA_MEMORY_CATEGORIES
#define LUA_MEMORY_CATEGORIES 256
#endif
// minimum size for the string table (must be power of 2)
#ifndef LUA_MINSTRTABSIZE
#define LUA_MINSTRTABSIZE 32
#endif
// maximum number of captures supported by pattern matching
#ifndef LUA_MAXCAPTURES
#define LUA_MAXCAPTURES 32
#endif
// enables callbacks to redirect code execution from Luau VM to a custom implementation
#ifndef LUA_CUSTOM_EXECUTION
#define LUA_CUSTOM_EXECUTION 0
#endif
// }==================================================================
/*
@@ LUAI_USER_ALIGNMENT_T is a type that requires maximum alignment.
** CHANGE it if your system requires alignments larger than double. (For
** instance, if your system supports long doubles and they must be
** aligned in 16-byte boundaries, then you should add long double in the
** union.) Probably you do not need to change this.
*/
#define LUAI_USER_ALIGNMENT_T \
union \
{ \
double u; \
void* s; \
long l; \
}
#ifndef LUA_VECTOR_SIZE
#define LUA_VECTOR_SIZE 3 // must be 3 or 4
#endif
#define LUA_EXTRA_SIZE (LUA_VECTOR_SIZE - 2)

View File

@ -1,137 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lua.h"
#define luaL_error(L, fmt, ...) luaL_errorL(L, fmt, ##__VA_ARGS__)
#define luaL_typeerror(L, narg, tname) luaL_typeerrorL(L, narg, tname)
#define luaL_argerror(L, narg, extramsg) luaL_argerrorL(L, narg, extramsg)
struct luaL_Reg
{
const char* name;
lua_CFunction func;
};
typedef struct luaL_Reg luaL_Reg;
LUALIB_API void luaL_register(lua_State* L, const char* libname, const luaL_Reg* l);
LUALIB_API int luaL_getmetafield(lua_State* L, int obj, const char* e);
LUALIB_API int luaL_callmeta(lua_State* L, int obj, const char* e);
LUALIB_API l_noret luaL_typeerrorL(lua_State* L, int narg, const char* tname);
LUALIB_API l_noret luaL_argerrorL(lua_State* L, int narg, const char* extramsg);
LUALIB_API const char* luaL_checklstring(lua_State* L, int numArg, size_t* l);
LUALIB_API const char* luaL_optlstring(lua_State* L, int numArg, const char* def, size_t* l);
LUALIB_API double luaL_checknumber(lua_State* L, int numArg);
LUALIB_API double luaL_optnumber(lua_State* L, int nArg, double def);
LUALIB_API int luaL_checkboolean(lua_State* L, int narg);
LUALIB_API int luaL_optboolean(lua_State* L, int narg, int def);
LUALIB_API int luaL_checkinteger(lua_State* L, int numArg);
LUALIB_API int luaL_optinteger(lua_State* L, int nArg, int def);
LUALIB_API unsigned luaL_checkunsigned(lua_State* L, int numArg);
LUALIB_API unsigned luaL_optunsigned(lua_State* L, int numArg, unsigned def);
LUALIB_API const float* luaL_checkvector(lua_State* L, int narg);
LUALIB_API const float* luaL_optvector(lua_State* L, int narg, const float* def);
LUALIB_API void luaL_checkstack(lua_State* L, int sz, const char* msg);
LUALIB_API void luaL_checktype(lua_State* L, int narg, int t);
LUALIB_API void luaL_checkany(lua_State* L, int narg);
LUALIB_API int luaL_newmetatable(lua_State* L, const char* tname);
LUALIB_API void* luaL_checkudata(lua_State* L, int ud, const char* tname);
LUALIB_API void luaL_where(lua_State* L, int lvl);
LUALIB_API LUA_PRINTF_ATTR(2, 3) l_noret luaL_errorL(lua_State* L, const char* fmt, ...);
LUALIB_API int luaL_checkoption(lua_State* L, int narg, const char* def, const char* const lst[]);
LUALIB_API const char* luaL_tolstring(lua_State* L, int idx, size_t* len);
LUALIB_API lua_State* luaL_newstate(void);
LUALIB_API const char* luaL_findtable(lua_State* L, int idx, const char* fname, int szhint);
LUALIB_API const char* luaL_typename(lua_State* L, int idx);
/*
** ===============================================================
** some useful macros
** ===============================================================
*/
#define luaL_argcheck(L, cond, arg, extramsg) ((void)((cond) ? (void)0 : luaL_argerror(L, arg, extramsg)))
#define luaL_argexpected(L, cond, arg, tname) ((void)((cond) ? (void)0 : luaL_typeerror(L, arg, tname)))
#define luaL_checkstring(L, n) (luaL_checklstring(L, (n), NULL))
#define luaL_optstring(L, n, d) (luaL_optlstring(L, (n), (d), NULL))
#define luaL_getmetatable(L, n) (lua_getfield(L, LUA_REGISTRYINDEX, (n)))
#define luaL_opt(L, f, n, d) (lua_isnoneornil(L, (n)) ? (d) : f(L, (n)))
// generic buffer manipulation
struct luaL_Buffer
{
char* p; // current position in buffer
char* end; // end of the current buffer
lua_State* L;
struct TString* storage;
char buffer[LUA_BUFFERSIZE];
};
typedef struct luaL_Buffer luaL_Buffer;
// when internal buffer storage is exhausted, a mutable string value 'storage' will be placed on the stack
// in general, functions expect the mutable string buffer to be placed on top of the stack (top-1)
// with the exception of luaL_addvalue that expects the value at the top and string buffer further away (top-2)
// functions that accept a 'boxloc' support string buffer placement at any location in the stack
// all the buffer users we have in Luau match this pattern, but it's something to keep in mind for new uses of buffers
#define luaL_addchar(B, c) ((void)((B)->p < (B)->end || luaL_extendbuffer(B, 1, -1)), (*(B)->p++ = (char)(c)))
#define luaL_addstring(B, s) luaL_addlstring(B, s, strlen(s), -1)
LUALIB_API void luaL_buffinit(lua_State* L, luaL_Buffer* B);
LUALIB_API char* luaL_buffinitsize(lua_State* L, luaL_Buffer* B, size_t size);
LUALIB_API char* luaL_extendbuffer(luaL_Buffer* B, size_t additionalsize, int boxloc);
LUALIB_API void luaL_reservebuffer(luaL_Buffer* B, size_t size, int boxloc);
LUALIB_API void luaL_addlstring(luaL_Buffer* B, const char* s, size_t l, int boxloc);
LUALIB_API void luaL_addvalue(luaL_Buffer* B);
LUALIB_API void luaL_pushresult(luaL_Buffer* B);
LUALIB_API void luaL_pushresultsize(luaL_Buffer* B, size_t size);
// builtin libraries
LUALIB_API int luaopen_base(lua_State* L);
#define LUA_COLIBNAME "coroutine"
LUALIB_API int luaopen_coroutine(lua_State* L);
#define LUA_TABLIBNAME "table"
LUALIB_API int luaopen_table(lua_State* L);
#define LUA_OSLIBNAME "os"
LUALIB_API int luaopen_os(lua_State* L);
#define LUA_STRLIBNAME "string"
LUALIB_API int luaopen_string(lua_State* L);
#define LUA_BITLIBNAME "bit32"
LUALIB_API int luaopen_bit32(lua_State* L);
#define LUA_UTF8LIBNAME "utf8"
LUALIB_API int luaopen_utf8(lua_State* L);
#define LUA_MATHLIBNAME "math"
LUALIB_API int luaopen_math(lua_State* L);
#define LUA_DBLIBNAME "debug"
LUALIB_API int luaopen_debug(lua_State* L);
// open all builtin libraries
LUALIB_API void luaL_openlibs(lua_State* L);
// sandbox libraries and globals
LUALIB_API void luaL_sandbox(lua_State* L);
LUALIB_API void luaL_sandboxthread(lua_State* L);

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
LUAI_FUNC const TValue* luaA_toobject(lua_State* L, int idx);
LUAI_FUNC void luaA_pushobject(lua_State* L, const TValue* o);

View File

@ -1,530 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include "lobject.h"
#include "lstate.h"
#include "lstring.h"
#include "lapi.h"
#include "lgc.h"
#include "lnumutils.h"
#include <string.h>
// convert a stack index to positive
#define abs_index(L, i) ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : lua_gettop(L) + (i) + 1)
/*
** {======================================================
** Error-report functions
** =======================================================
*/
static const char* currfuncname(lua_State* L)
{
Closure* cl = L->ci > L->base_ci ? curr_func(L) : NULL;
const char* debugname = cl && cl->isC ? cl->c.debugname + 0 : NULL;
if (debugname && strcmp(debugname, "__namecall") == 0)
return L->namecall ? getstr(L->namecall) : NULL;
else
return debugname;
}
l_noret luaL_argerrorL(lua_State* L, int narg, const char* extramsg)
{
const char* fname = currfuncname(L);
if (fname)
luaL_error(L, "invalid argument #%d to '%s' (%s)", narg, fname, extramsg);
else
luaL_error(L, "invalid argument #%d (%s)", narg, extramsg);
}
l_noret luaL_typeerrorL(lua_State* L, int narg, const char* tname)
{
const char* fname = currfuncname(L);
const TValue* obj = luaA_toobject(L, narg);
if (obj)
{
if (fname)
luaL_error(L, "invalid argument #%d to '%s' (%s expected, got %s)", narg, fname, tname, luaT_objtypename(L, obj));
else
luaL_error(L, "invalid argument #%d (%s expected, got %s)", narg, tname, luaT_objtypename(L, obj));
}
else
{
if (fname)
luaL_error(L, "missing argument #%d to '%s' (%s expected)", narg, fname, tname);
else
luaL_error(L, "missing argument #%d (%s expected)", narg, tname);
}
}
static l_noret tag_error(lua_State* L, int narg, int tag)
{
luaL_typeerrorL(L, narg, lua_typename(L, tag));
}
void luaL_where(lua_State* L, int level)
{
lua_Debug ar;
if (lua_getinfo(L, level, "sl", &ar) && ar.currentline > 0)
{
lua_pushfstring(L, "%s:%d: ", ar.short_src, ar.currentline);
return;
}
lua_pushliteral(L, ""); // else, no information available...
}
l_noret luaL_errorL(lua_State* L, const char* fmt, ...)
{
va_list argp;
va_start(argp, fmt);
luaL_where(L, 1);
lua_pushvfstring(L, fmt, argp);
va_end(argp);
lua_concat(L, 2);
lua_error(L);
}
// }======================================================
int luaL_checkoption(lua_State* L, int narg, const char* def, const char* const lst[])
{
const char* name = (def) ? luaL_optstring(L, narg, def) : luaL_checkstring(L, narg);
int i;
for (i = 0; lst[i]; i++)
if (strcmp(lst[i], name) == 0)
return i;
const char* msg = lua_pushfstring(L, "invalid option '%s'", name);
luaL_argerrorL(L, narg, msg);
}
int luaL_newmetatable(lua_State* L, const char* tname)
{
lua_getfield(L, LUA_REGISTRYINDEX, tname); // get registry.name
if (!lua_isnil(L, -1)) // name already in use?
return 0; // leave previous value on top, but return 0
lua_pop(L, 1);
lua_newtable(L); // create metatable
lua_pushvalue(L, -1);
lua_setfield(L, LUA_REGISTRYINDEX, tname); // registry.name = metatable
return 1;
}
void* luaL_checkudata(lua_State* L, int ud, const char* tname)
{
void* p = lua_touserdata(L, ud);
if (p != NULL)
{ // value is a userdata?
if (lua_getmetatable(L, ud))
{ // does it have a metatable?
lua_getfield(L, LUA_REGISTRYINDEX, tname); // get correct metatable
if (lua_rawequal(L, -1, -2))
{ // does it have the correct mt?
lua_pop(L, 2); // remove both metatables
return p;
}
}
}
luaL_typeerrorL(L, ud, tname); // else error
}
void luaL_checkstack(lua_State* L, int space, const char* mes)
{
if (!lua_checkstack(L, space))
luaL_error(L, "stack overflow (%s)", mes);
}
void luaL_checktype(lua_State* L, int narg, int t)
{
if (lua_type(L, narg) != t)
tag_error(L, narg, t);
}
void luaL_checkany(lua_State* L, int narg)
{
if (lua_type(L, narg) == LUA_TNONE)
luaL_error(L, "missing argument #%d", narg);
}
const char* luaL_checklstring(lua_State* L, int narg, size_t* len)
{
const char* s = lua_tolstring(L, narg, len);
if (!s)
tag_error(L, narg, LUA_TSTRING);
return s;
}
const char* luaL_optlstring(lua_State* L, int narg, const char* def, size_t* len)
{
if (lua_isnoneornil(L, narg))
{
if (len)
*len = (def ? strlen(def) : 0);
return def;
}
else
return luaL_checklstring(L, narg, len);
}
double luaL_checknumber(lua_State* L, int narg)
{
int isnum;
double d = lua_tonumberx(L, narg, &isnum);
if (!isnum)
tag_error(L, narg, LUA_TNUMBER);
return d;
}
double luaL_optnumber(lua_State* L, int narg, double def)
{
return luaL_opt(L, luaL_checknumber, narg, def);
}
int luaL_checkboolean(lua_State* L, int narg)
{
// This checks specifically for boolean values, ignoring
// all other truthy/falsy values. If the desired result
// is true if value is present then lua_toboolean should
// directly be used instead.
if (!lua_isboolean(L, narg))
tag_error(L, narg, LUA_TBOOLEAN);
return lua_toboolean(L, narg);
}
int luaL_optboolean(lua_State* L, int narg, int def)
{
return luaL_opt(L, luaL_checkboolean, narg, def);
}
int luaL_checkinteger(lua_State* L, int narg)
{
int isnum;
int d = lua_tointegerx(L, narg, &isnum);
if (!isnum)
tag_error(L, narg, LUA_TNUMBER);
return d;
}
int luaL_optinteger(lua_State* L, int narg, int def)
{
return luaL_opt(L, luaL_checkinteger, narg, def);
}
unsigned luaL_checkunsigned(lua_State* L, int narg)
{
int isnum;
unsigned d = lua_tounsignedx(L, narg, &isnum);
if (!isnum)
tag_error(L, narg, LUA_TNUMBER);
return d;
}
unsigned luaL_optunsigned(lua_State* L, int narg, unsigned def)
{
return luaL_opt(L, luaL_checkunsigned, narg, def);
}
const float* luaL_checkvector(lua_State* L, int narg)
{
const float* v = lua_tovector(L, narg);
if (!v)
tag_error(L, narg, LUA_TVECTOR);
return v;
}
const float* luaL_optvector(lua_State* L, int narg, const float* def)
{
return luaL_opt(L, luaL_checkvector, narg, def);
}
int luaL_getmetafield(lua_State* L, int obj, const char* event)
{
if (!lua_getmetatable(L, obj)) // no metatable?
return 0;
lua_pushstring(L, event);
lua_rawget(L, -2);
if (lua_isnil(L, -1))
{
lua_pop(L, 2); // remove metatable and metafield
return 0;
}
else
{
lua_remove(L, -2); // remove only metatable
return 1;
}
}
int luaL_callmeta(lua_State* L, int obj, const char* event)
{
obj = abs_index(L, obj);
if (!luaL_getmetafield(L, obj, event)) // no metafield?
return 0;
lua_pushvalue(L, obj);
lua_call(L, 1, 1);
return 1;
}
static int libsize(const luaL_Reg* l)
{
int size = 0;
for (; l->name; l++)
size++;
return size;
}
void luaL_register(lua_State* L, const char* libname, const luaL_Reg* l)
{
if (libname)
{
int size = libsize(l);
// check whether lib already exists
luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 1);
lua_getfield(L, -1, libname); // get _LOADED[libname]
if (!lua_istable(L, -1))
{ // not found?
lua_pop(L, 1); // remove previous result
// try global variable (and create one if it does not exist)
if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL)
luaL_error(L, "name conflict for module '%s'", libname);
lua_pushvalue(L, -1);
lua_setfield(L, -3, libname); // _LOADED[libname] = new table
}
lua_remove(L, -2); // remove _LOADED table
}
for (; l->name; l++)
{
lua_pushcfunction(L, l->func, l->name);
lua_setfield(L, -2, l->name);
}
}
const char* luaL_findtable(lua_State* L, int idx, const char* fname, int szhint)
{
const char* e;
lua_pushvalue(L, idx);
do
{
e = strchr(fname, '.');
if (e == NULL)
e = fname + strlen(fname);
lua_pushlstring(L, fname, e - fname);
lua_rawget(L, -2);
if (lua_isnil(L, -1))
{ // no such field?
lua_pop(L, 1); // remove this nil
lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); // new table for field
lua_pushlstring(L, fname, e - fname);
lua_pushvalue(L, -2);
lua_settable(L, -4); // set new table into field
}
else if (!lua_istable(L, -1))
{ // field has a non-table value?
lua_pop(L, 2); // remove table and value
return fname; // return problematic part of the name
}
lua_remove(L, -2); // remove previous table
fname = e + 1;
} while (*e == '.');
return NULL;
}
const char* luaL_typename(lua_State* L, int idx)
{
const TValue* obj = luaA_toobject(L, idx);
return luaT_objtypename(L, obj);
}
/*
** {======================================================
** Generic Buffer manipulation
** =======================================================
*/
static size_t getnextbuffersize(lua_State* L, size_t currentsize, size_t desiredsize)
{
size_t newsize = currentsize + currentsize / 2;
// check for size overflow
if (SIZE_MAX - desiredsize < currentsize)
luaL_error(L, "buffer too large");
// growth factor might not be enough to satisfy the desired size
if (newsize < desiredsize)
newsize = desiredsize;
return newsize;
}
void luaL_buffinit(lua_State* L, luaL_Buffer* B)
{
// start with an internal buffer
B->p = B->buffer;
B->end = B->p + LUA_BUFFERSIZE;
B->L = L;
B->storage = nullptr;
}
char* luaL_buffinitsize(lua_State* L, luaL_Buffer* B, size_t size)
{
luaL_buffinit(L, B);
luaL_reservebuffer(B, size, -1);
return B->p;
}
char* luaL_extendbuffer(luaL_Buffer* B, size_t additionalsize, int boxloc)
{
lua_State* L = B->L;
if (B->storage)
LUAU_ASSERT(B->storage == tsvalue(L->top + boxloc));
char* base = B->storage ? B->storage->data : B->buffer;
size_t capacity = B->end - base;
size_t nextsize = getnextbuffersize(B->L, capacity, capacity + additionalsize);
TString* newStorage = luaS_bufstart(L, nextsize);
memcpy(newStorage->data, base, B->p - base);
// place the string storage at the expected position in the stack
if (base == B->buffer)
{
lua_pushnil(L);
lua_insert(L, boxloc);
}
setsvalue(L, L->top + boxloc, newStorage);
B->p = newStorage->data + (B->p - base);
B->end = newStorage->data + nextsize;
B->storage = newStorage;
return B->p;
}
void luaL_reservebuffer(luaL_Buffer* B, size_t size, int boxloc)
{
if (size_t(B->end - B->p) < size)
luaL_extendbuffer(B, size - (B->end - B->p), boxloc);
}
void luaL_addlstring(luaL_Buffer* B, const char* s, size_t len, int boxloc)
{
if (size_t(B->end - B->p) < len)
luaL_extendbuffer(B, len - (B->end - B->p), boxloc);
memcpy(B->p, s, len);
B->p += len;
}
void luaL_addvalue(luaL_Buffer* B)
{
lua_State* L = B->L;
size_t vl;
if (const char* s = lua_tolstring(L, -1, &vl))
{
if (size_t(B->end - B->p) < vl)
luaL_extendbuffer(B, vl - (B->end - B->p), -2);
memcpy(B->p, s, vl);
B->p += vl;
lua_pop(L, 1);
}
}
void luaL_pushresult(luaL_Buffer* B)
{
lua_State* L = B->L;
if (TString* storage = B->storage)
{
luaC_checkGC(L);
// if we finished just at the end of the string buffer, we can convert it to a mutable stirng without a copy
if (B->p == B->end)
{
setsvalue(L, L->top - 1, luaS_buffinish(L, storage));
}
else
{
setsvalue(L, L->top - 1, luaS_newlstr(L, storage->data, B->p - storage->data));
}
}
else
{
lua_pushlstring(L, B->buffer, B->p - B->buffer);
}
}
void luaL_pushresultsize(luaL_Buffer* B, size_t size)
{
B->p += size;
luaL_pushresult(B);
}
// }======================================================
const char* luaL_tolstring(lua_State* L, int idx, size_t* len)
{
if (luaL_callmeta(L, idx, "__tostring")) // is there a metafield?
{
if (!lua_isstring(L, -1))
luaL_error(L, "'__tostring' must return a string");
return lua_tolstring(L, -1, len);
}
switch (lua_type(L, idx))
{
case LUA_TNUMBER:
{
double n = lua_tonumber(L, idx);
char s[LUAI_MAXNUM2STR];
char* e = luai_num2str(s, n);
lua_pushlstring(L, s, e - s);
break;
}
case LUA_TSTRING:
lua_pushvalue(L, idx);
break;
case LUA_TBOOLEAN:
lua_pushstring(L, (lua_toboolean(L, idx) ? "true" : "false"));
break;
case LUA_TNIL:
lua_pushliteral(L, "nil");
break;
case LUA_TVECTOR:
{
const float* v = lua_tovector(L, idx);
char s[LUAI_MAXNUM2STR * LUA_VECTOR_SIZE];
char* e = s;
for (int i = 0; i < LUA_VECTOR_SIZE; ++i)
{
if (i != 0)
{
*e++ = ',';
*e++ = ' ';
}
e = luai_num2str(e, v[i]);
}
lua_pushlstring(L, s, e - s);
break;
}
default:
{
const void* ptr = lua_topointer(L, idx);
unsigned long long enc = lua_encodepointer(L, uintptr_t(ptr));
lua_pushfstring(L, "%s: 0x%016llx", luaL_typename(L, idx), enc);
break;
}
}
return lua_tolstring(L, -1, len);
}

View File

@ -1,480 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include "lstate.h"
#include "lapi.h"
#include "ldo.h"
#include "ludata.h"
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
static void writestring(const char* s, size_t l)
{
fwrite(s, 1, l, stdout);
}
static int luaB_print(lua_State* L)
{
int n = lua_gettop(L); // number of arguments
for (int i = 1; i <= n; i++)
{
size_t l;
const char* s = luaL_tolstring(L, i, &l); // convert to string using __tostring et al
if (i > 1)
writestring("\t", 1);
writestring(s, l);
lua_pop(L, 1); // pop result
}
writestring("\n", 1);
return 0;
}
static int luaB_tonumber(lua_State* L)
{
int base = luaL_optinteger(L, 2, 10);
if (base == 10)
{ // standard conversion
int isnum = 0;
double n = lua_tonumberx(L, 1, &isnum);
if (isnum)
{
lua_pushnumber(L, n);
return 1;
}
luaL_checkany(L, 1); // error if we don't have any argument
}
else
{
const char* s1 = luaL_checkstring(L, 1);
luaL_argcheck(L, 2 <= base && base <= 36, 2, "base out of range");
char* s2;
unsigned long long n;
n = strtoull(s1, &s2, base);
if (s1 != s2)
{ // at least one valid digit?
while (isspace((unsigned char)(*s2)))
s2++; // skip trailing spaces
if (*s2 == '\0')
{ // no invalid trailing characters?
lua_pushnumber(L, (double)n);
return 1;
}
}
}
lua_pushnil(L); // else not a number
return 1;
}
static int luaB_error(lua_State* L)
{
int level = luaL_optinteger(L, 2, 1);
lua_settop(L, 1);
if (lua_isstring(L, 1) && level > 0)
{ // add extra information?
luaL_where(L, level);
lua_pushvalue(L, 1);
lua_concat(L, 2);
}
lua_error(L);
}
static int luaB_getmetatable(lua_State* L)
{
luaL_checkany(L, 1);
if (!lua_getmetatable(L, 1))
{
lua_pushnil(L);
return 1; // no metatable
}
luaL_getmetafield(L, 1, "__metatable");
return 1; // returns either __metatable field (if present) or metatable
}
static int luaB_setmetatable(lua_State* L)
{
int t = lua_type(L, 2);
luaL_checktype(L, 1, LUA_TTABLE);
luaL_argexpected(L, t == LUA_TNIL || t == LUA_TTABLE, 2, "nil or table");
if (luaL_getmetafield(L, 1, "__metatable"))
luaL_error(L, "cannot change a protected metatable");
lua_settop(L, 2);
lua_setmetatable(L, 1);
return 1;
}
static void getfunc(lua_State* L, int opt)
{
if (lua_isfunction(L, 1))
lua_pushvalue(L, 1);
else
{
lua_Debug ar;
int level = opt ? luaL_optinteger(L, 1, 1) : luaL_checkinteger(L, 1);
luaL_argcheck(L, level >= 0, 1, "level must be non-negative");
if (lua_getinfo(L, level, "f", &ar) == 0)
luaL_argerror(L, 1, "invalid level");
if (lua_isnil(L, -1))
luaL_error(L, "no function environment for tail call at level %d", level);
}
}
static int luaB_getfenv(lua_State* L)
{
getfunc(L, 1);
if (lua_iscfunction(L, -1)) // is a C function?
lua_pushvalue(L, LUA_GLOBALSINDEX); // return the thread's global env.
else
lua_getfenv(L, -1);
lua_setsafeenv(L, -1, false);
return 1;
}
static int luaB_setfenv(lua_State* L)
{
luaL_checktype(L, 2, LUA_TTABLE);
getfunc(L, 0);
lua_pushvalue(L, 2);
lua_setsafeenv(L, -1, false);
if (lua_isnumber(L, 1) && lua_tonumber(L, 1) == 0)
{
// change environment of current thread
lua_pushthread(L);
lua_insert(L, -2);
lua_setfenv(L, -2);
return 0;
}
else if (lua_iscfunction(L, -2) || lua_setfenv(L, -2) == 0)
luaL_error(L, "'setfenv' cannot change environment of given object");
return 1;
}
static int luaB_rawequal(lua_State* L)
{
luaL_checkany(L, 1);
luaL_checkany(L, 2);
lua_pushboolean(L, lua_rawequal(L, 1, 2));
return 1;
}
static int luaB_rawget(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
luaL_checkany(L, 2);
lua_settop(L, 2);
lua_rawget(L, 1);
return 1;
}
static int luaB_rawset(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
luaL_checkany(L, 2);
luaL_checkany(L, 3);
lua_settop(L, 3);
lua_rawset(L, 1);
return 1;
}
static int luaB_rawlen(lua_State* L)
{
int tt = lua_type(L, 1);
luaL_argcheck(L, tt == LUA_TTABLE || tt == LUA_TSTRING, 1, "table or string expected");
int len = lua_objlen(L, 1);
lua_pushinteger(L, len);
return 1;
}
static int luaB_gcinfo(lua_State* L)
{
lua_pushinteger(L, lua_gc(L, LUA_GCCOUNT, 0));
return 1;
}
static int luaB_type(lua_State* L)
{
luaL_checkany(L, 1);
// resulting name doesn't differentiate between userdata types
lua_pushstring(L, lua_typename(L, lua_type(L, 1)));
return 1;
}
static int luaB_typeof(lua_State* L)
{
luaL_checkany(L, 1);
// resulting name returns __type if specified unless the input is a newproxy-created userdata
lua_pushstring(L, luaL_typename(L, 1));
return 1;
}
int luaB_next(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
lua_settop(L, 2); // create a 2nd argument if there isn't one
if (lua_next(L, 1))
return 2;
else
{
lua_pushnil(L);
return 1;
}
}
static int luaB_pairs(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
lua_pushvalue(L, lua_upvalueindex(1)); // return generator,
lua_pushvalue(L, 1); // state,
lua_pushnil(L); // and initial value
return 3;
}
int luaB_inext(lua_State* L)
{
int i = luaL_checkinteger(L, 2);
luaL_checktype(L, 1, LUA_TTABLE);
i++; // next value
lua_pushinteger(L, i);
lua_rawgeti(L, 1, i);
return (lua_isnil(L, -1)) ? 0 : 2;
}
static int luaB_ipairs(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
lua_pushvalue(L, lua_upvalueindex(1)); // return generator,
lua_pushvalue(L, 1); // state,
lua_pushinteger(L, 0); // and initial value
return 3;
}
static int luaB_assert(lua_State* L)
{
luaL_checkany(L, 1);
if (!lua_toboolean(L, 1))
luaL_error(L, "%s", luaL_optstring(L, 2, "assertion failed!"));
return lua_gettop(L);
}
static int luaB_select(lua_State* L)
{
int n = lua_gettop(L);
if (lua_type(L, 1) == LUA_TSTRING && *lua_tostring(L, 1) == '#')
{
lua_pushinteger(L, n - 1);
return 1;
}
else
{
int i = luaL_checkinteger(L, 1);
if (i < 0)
i = n + i;
else if (i > n)
i = n;
luaL_argcheck(L, 1 <= i, 1, "index out of range");
return n - i;
}
}
static void luaB_pcallrun(lua_State* L, void* ud)
{
StkId func = (StkId)ud;
luaD_call(L, func, LUA_MULTRET);
}
static int luaB_pcally(lua_State* L)
{
luaL_checkany(L, 1);
StkId func = L->base;
// any errors from this point on are handled by continuation
L->ci->flags |= LUA_CALLINFO_HANDLE;
// maintain yieldable invariant (baseCcalls <= nCcalls)
L->baseCcalls++;
int status = luaD_pcall(L, luaB_pcallrun, func, savestack(L, func), 0);
L->baseCcalls--;
// necessary to accomodate functions that return lots of values
expandstacklimit(L, L->top);
// yielding means we need to propagate yield; resume will call continuation function later
if (status == 0 && (L->status == LUA_YIELD || L->status == LUA_BREAK))
return -1; // -1 is a marker for yielding from C
// immediate return (error or success)
lua_rawcheckstack(L, 1);
lua_pushboolean(L, status == 0);
lua_insert(L, 1);
return lua_gettop(L); // return status + all results
}
static int luaB_pcallcont(lua_State* L, int status)
{
if (status == 0)
{
lua_rawcheckstack(L, 1);
lua_pushboolean(L, true);
lua_insert(L, 1); // insert status before all results
return lua_gettop(L);
}
else
{
lua_rawcheckstack(L, 1);
lua_pushboolean(L, false);
lua_insert(L, -2); // insert status before error object
return 2;
}
}
static int luaB_xpcally(lua_State* L)
{
luaL_checktype(L, 2, LUA_TFUNCTION);
// swap function & error function
lua_pushvalue(L, 1);
lua_pushvalue(L, 2);
lua_replace(L, 1);
lua_replace(L, 2);
// at this point the stack looks like err, f, args
// any errors from this point on are handled by continuation
L->ci->flags |= LUA_CALLINFO_HANDLE;
StkId errf = L->base;
StkId func = L->base + 1;
// maintain yieldable invariant (baseCcalls <= nCcalls)
L->baseCcalls++;
int status = luaD_pcall(L, luaB_pcallrun, func, savestack(L, func), savestack(L, errf));
L->baseCcalls--;
// necessary to accomodate functions that return lots of values
expandstacklimit(L, L->top);
// yielding means we need to propagate yield; resume will call continuation function later
if (status == 0 && (L->status == LUA_YIELD || L->status == LUA_BREAK))
return -1; // -1 is a marker for yielding from C
// immediate return (error or success)
lua_rawcheckstack(L, 1);
lua_pushboolean(L, status == 0);
lua_replace(L, 1); // replace error function with status
return lua_gettop(L); // return status + all results
}
static void luaB_xpcallerr(lua_State* L, void* ud)
{
StkId func = (StkId)ud;
luaD_call(L, func, 1);
}
static int luaB_xpcallcont(lua_State* L, int status)
{
if (status == 0)
{
lua_rawcheckstack(L, 1);
lua_pushboolean(L, true);
lua_replace(L, 1); // replace error function with status
return lua_gettop(L); // return status + all results
}
else
{
lua_rawcheckstack(L, 3);
lua_pushboolean(L, false);
lua_pushvalue(L, 1); // push error function on top of the stack
lua_pushvalue(L, -3); // push error object (that was on top of the stack before)
StkId res = L->top - 3;
StkId errf = L->top - 2;
// note: we pass res as errfunc as a short cut; if errf generates an error, we'll try to execute res (boolean) and fail
luaD_pcall(L, luaB_xpcallerr, errf, savestack(L, errf), savestack(L, res));
return 2;
}
}
static int luaB_tostring(lua_State* L)
{
luaL_checkany(L, 1);
luaL_tolstring(L, 1, NULL);
return 1;
}
static int luaB_newproxy(lua_State* L)
{
int t = lua_type(L, 1);
luaL_argexpected(L, t == LUA_TNONE || t == LUA_TNIL || t == LUA_TBOOLEAN, 1, "nil or boolean");
bool needsmt = lua_toboolean(L, 1);
lua_newuserdatatagged(L, 0, UTAG_PROXY);
if (needsmt)
{
lua_newtable(L);
lua_setmetatable(L, -2);
}
return 1;
}
static const luaL_Reg base_funcs[] = {
{"assert", luaB_assert},
{"error", luaB_error},
{"gcinfo", luaB_gcinfo},
{"getfenv", luaB_getfenv},
{"getmetatable", luaB_getmetatable},
{"next", luaB_next},
{"newproxy", luaB_newproxy},
{"print", luaB_print},
{"rawequal", luaB_rawequal},
{"rawget", luaB_rawget},
{"rawset", luaB_rawset},
{"rawlen", luaB_rawlen},
{"select", luaB_select},
{"setfenv", luaB_setfenv},
{"setmetatable", luaB_setmetatable},
{"tonumber", luaB_tonumber},
{"tostring", luaB_tostring},
{"type", luaB_type},
{"typeof", luaB_typeof},
{NULL, NULL},
};
static void auxopen(lua_State* L, const char* name, lua_CFunction f, lua_CFunction u)
{
lua_pushcfunction(L, u, NULL);
lua_pushcclosure(L, f, name, 1);
lua_setfield(L, -2, name);
}
int luaopen_base(lua_State* L)
{
// set global _G
lua_pushvalue(L, LUA_GLOBALSINDEX);
lua_setglobal(L, "_G");
// open lib into global table
luaL_register(L, "_G", base_funcs);
lua_pushliteral(L, "Luau");
lua_setglobal(L, "_VERSION"); // set global _VERSION
// `ipairs' and `pairs' need auxiliary functions as upvalues
auxopen(L, "ipairs", luaB_ipairs, luaB_inext);
auxopen(L, "pairs", luaB_pairs, luaB_next);
lua_pushcclosurek(L, luaB_pcally, "pcall", 0, luaB_pcallcont);
lua_setfield(L, -2, "pcall");
lua_pushcclosurek(L, luaB_xpcally, "xpcall", 0, luaB_xpcallcont);
lua_setfield(L, -2, "xpcall");
return 1;
}

View File

@ -1,236 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include "lcommon.h"
#include "lnumutils.h"
#define ALLONES ~0u
#define NBITS int(8 * sizeof(unsigned))
// macro to trim extra bits
#define trim(x) ((x)&ALLONES)
// builds a number with 'n' ones (1 <= n <= NBITS)
#define mask(n) (~((ALLONES << 1) << ((n)-1)))
typedef unsigned b_uint;
static b_uint andaux(lua_State* L)
{
int i, n = lua_gettop(L);
b_uint r = ~(b_uint)0;
for (i = 1; i <= n; i++)
r &= luaL_checkunsigned(L, i);
return trim(r);
}
static int b_and(lua_State* L)
{
b_uint r = andaux(L);
lua_pushunsigned(L, r);
return 1;
}
static int b_test(lua_State* L)
{
b_uint r = andaux(L);
lua_pushboolean(L, r != 0);
return 1;
}
static int b_or(lua_State* L)
{
int i, n = lua_gettop(L);
b_uint r = 0;
for (i = 1; i <= n; i++)
r |= luaL_checkunsigned(L, i);
lua_pushunsigned(L, trim(r));
return 1;
}
static int b_xor(lua_State* L)
{
int i, n = lua_gettop(L);
b_uint r = 0;
for (i = 1; i <= n; i++)
r ^= luaL_checkunsigned(L, i);
lua_pushunsigned(L, trim(r));
return 1;
}
static int b_not(lua_State* L)
{
b_uint r = ~luaL_checkunsigned(L, 1);
lua_pushunsigned(L, trim(r));
return 1;
}
static int b_shift(lua_State* L, b_uint r, int i)
{
if (i < 0)
{ // shift right?
i = -i;
r = trim(r);
if (i >= NBITS)
r = 0;
else
r >>= i;
}
else
{ // shift left
if (i >= NBITS)
r = 0;
else
r <<= i;
r = trim(r);
}
lua_pushunsigned(L, r);
return 1;
}
static int b_lshift(lua_State* L)
{
return b_shift(L, luaL_checkunsigned(L, 1), luaL_checkinteger(L, 2));
}
static int b_rshift(lua_State* L)
{
return b_shift(L, luaL_checkunsigned(L, 1), -luaL_checkinteger(L, 2));
}
static int b_arshift(lua_State* L)
{
b_uint r = luaL_checkunsigned(L, 1);
int i = luaL_checkinteger(L, 2);
if (i < 0 || !(r & ((b_uint)1 << (NBITS - 1))))
return b_shift(L, r, -i);
else
{ // arithmetic shift for 'negative' number
if (i >= NBITS)
r = ALLONES;
else
r = trim((r >> i) | ~(~(b_uint)0 >> i)); // add signal bit
lua_pushunsigned(L, r);
return 1;
}
}
static int b_rot(lua_State* L, int i)
{
b_uint r = luaL_checkunsigned(L, 1);
i &= (NBITS - 1); // i = i % NBITS
r = trim(r);
if (i != 0) // avoid undefined shift of NBITS when i == 0
r = (r << i) | (r >> (NBITS - i));
lua_pushunsigned(L, trim(r));
return 1;
}
static int b_lrot(lua_State* L)
{
return b_rot(L, luaL_checkinteger(L, 2));
}
static int b_rrot(lua_State* L)
{
return b_rot(L, -luaL_checkinteger(L, 2));
}
/*
** get field and width arguments for field-manipulation functions,
** checking whether they are valid.
** ('luaL_error' called without 'return' to avoid later warnings about
** 'width' being used uninitialized.)
*/
static int fieldargs(lua_State* L, int farg, int* width)
{
int f = luaL_checkinteger(L, farg);
int w = luaL_optinteger(L, farg + 1, 1);
luaL_argcheck(L, 0 <= f, farg, "field cannot be negative");
luaL_argcheck(L, 0 < w, farg + 1, "width must be positive");
if (f + w > NBITS)
luaL_error(L, "trying to access non-existent bits");
*width = w;
return f;
}
static int b_extract(lua_State* L)
{
int w;
b_uint r = luaL_checkunsigned(L, 1);
int f = fieldargs(L, 2, &w);
r = (r >> f) & mask(w);
lua_pushunsigned(L, r);
return 1;
}
static int b_replace(lua_State* L)
{
int w;
b_uint r = luaL_checkunsigned(L, 1);
b_uint v = luaL_checkunsigned(L, 2);
int f = fieldargs(L, 3, &w);
int m = mask(w);
v &= m; // erase bits outside given width
r = (r & ~(m << f)) | (v << f);
lua_pushunsigned(L, r);
return 1;
}
static int b_countlz(lua_State* L)
{
b_uint v = luaL_checkunsigned(L, 1);
b_uint r = NBITS;
for (int i = 0; i < NBITS; ++i)
if (v & (1u << (NBITS - 1 - i)))
{
r = i;
break;
}
lua_pushunsigned(L, r);
return 1;
}
static int b_countrz(lua_State* L)
{
b_uint v = luaL_checkunsigned(L, 1);
b_uint r = NBITS;
for (int i = 0; i < NBITS; ++i)
if (v & (1u << i))
{
r = i;
break;
}
lua_pushunsigned(L, r);
return 1;
}
static const luaL_Reg bitlib[] = {
{"arshift", b_arshift},
{"band", b_and},
{"bnot", b_not},
{"bor", b_or},
{"bxor", b_xor},
{"btest", b_test},
{"extract", b_extract},
{"lrotate", b_lrot},
{"lshift", b_lshift},
{"replace", b_replace},
{"rrotate", b_rrot},
{"rshift", b_rshift},
{"countlz", b_countlz},
{"countrz", b_countrz},
{NULL, NULL},
};
int luaopen_bit32(lua_State* L)
{
luaL_register(L, LUA_BITLIBNAME, bitlib);
return 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
typedef int (*luau_FastFunction)(lua_State* L, StkId res, TValue* arg0, int nresults, StkId args, int nparams);
extern const luau_FastFunction luauF_table[256];

View File

@ -1,6 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
// This is a forwarding header for Luau bytecode definition
#include "Luau/Bytecode.h"

View File

@ -1,48 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include <limits.h>
#include <stdint.h>
#include "luaconf.h"
#include "Luau/Common.h"
typedef LUAI_USER_ALIGNMENT_T L_Umaxalign;
// internal assertions for in-house debugging
#define check_exp(c, e) (LUAU_ASSERT(c), (e))
#define api_check(l, e) LUAU_ASSERT(e)
#ifndef cast_to
#define cast_to(t, exp) ((t)(exp))
#endif
#define cast_byte(i) cast_to(uint8_t, (i))
#define cast_num(i) cast_to(double, (i))
#define cast_int(i) cast_to(int, (i))
/*
** type for virtual-machine instructions
** must be an unsigned with (at least) 4 bytes (see details in lopcodes.h)
*/
typedef uint32_t Instruction;
/*
** macro to control inclusion of some hard tests on stack reallocation
*/
#if defined(HARDSTACKTESTS) && HARDSTACKTESTS
#define condhardstacktests(x) (x)
#else
#define condhardstacktests(x) ((void)0)
#endif
/*
** macro to control inclusion of some hard tests on garbage collection
*/
#if defined(HARDMEMTESTS) && HARDMEMTESTS
#define condhardmemtests(x, l) (HARDMEMTESTS >= l ? (x) : (void)0)
#else
#define condhardmemtests(x, l) ((void)0)
#endif

View File

@ -1,256 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include "lstate.h"
#include "lvm.h"
#define CO_STATUS_ERROR -1
#define CO_STATUS_BREAK -2
static const char* const statnames[] = {"running", "suspended", "normal", "dead", "dead"}; // dead appears twice for LUA_COERR and LUA_COFIN
static int costatus(lua_State* L)
{
lua_State* co = lua_tothread(L, 1);
luaL_argexpected(L, co, 1, "thread");
lua_pushstring(L, statnames[lua_costatus(L, co)]);
return 1;
}
static int auxresume(lua_State* L, lua_State* co, int narg)
{
// error handling for edge cases
if (co->status != LUA_YIELD)
{
int status = lua_costatus(L, co);
if (status != LUA_COSUS)
{
lua_pushfstring(L, "cannot resume %s coroutine", statnames[status]);
return CO_STATUS_ERROR;
}
}
if (narg)
{
if (!lua_checkstack(co, narg))
luaL_error(L, "too many arguments to resume");
lua_xmove(L, co, narg);
}
co->singlestep = L->singlestep;
int status = lua_resume(co, L, narg);
if (status == 0 || status == LUA_YIELD)
{
int nres = cast_int(co->top - co->base);
if (nres)
{
// +1 accounts for true/false status in resumefinish
if (nres + 1 > LUA_MINSTACK && !lua_checkstack(L, nres + 1))
luaL_error(L, "too many results to resume");
lua_xmove(co, L, nres); // move yielded values
}
return nres;
}
else if (status == LUA_BREAK)
{
return CO_STATUS_BREAK;
}
else
{
lua_xmove(co, L, 1); // move error message
return CO_STATUS_ERROR;
}
}
static int interruptThread(lua_State* L, lua_State* co)
{
// notify the debugger that the thread was suspended
if (L->global->cb.debuginterrupt)
luau_callhook(L, L->global->cb.debuginterrupt, co);
return lua_break(L);
}
static int auxresumecont(lua_State* L, lua_State* co)
{
if (co->status == 0 || co->status == LUA_YIELD)
{
int nres = cast_int(co->top - co->base);
if (!lua_checkstack(L, nres + 1))
luaL_error(L, "too many results to resume");
lua_xmove(co, L, nres); // move yielded values
return nres;
}
else
{
lua_rawcheckstack(L, 2);
lua_xmove(co, L, 1); // move error message
return CO_STATUS_ERROR;
}
}
static int coresumefinish(lua_State* L, int r)
{
if (r < 0)
{
lua_pushboolean(L, 0);
lua_insert(L, -2);
return 2; // return false + error message
}
else
{
lua_pushboolean(L, 1);
lua_insert(L, -(r + 1));
return r + 1; // return true + `resume' returns
}
}
static int coresumey(lua_State* L)
{
lua_State* co = lua_tothread(L, 1);
luaL_argexpected(L, co, 1, "thread");
int narg = cast_int(L->top - L->base) - 1;
int r = auxresume(L, co, narg);
if (r == CO_STATUS_BREAK)
return interruptThread(L, co);
return coresumefinish(L, r);
}
static int coresumecont(lua_State* L, int status)
{
lua_State* co = lua_tothread(L, 1);
luaL_argexpected(L, co, 1, "thread");
// if coroutine still hasn't yielded after the break, break current thread again
if (co->status == LUA_BREAK)
return interruptThread(L, co);
int r = auxresumecont(L, co);
return coresumefinish(L, r);
}
static int auxwrapfinish(lua_State* L, int r)
{
if (r < 0)
{
if (lua_isstring(L, -1))
{ // error object is a string?
luaL_where(L, 1); // add extra info
lua_insert(L, -2);
lua_concat(L, 2);
}
lua_error(L); // propagate error
}
return r;
}
static int auxwrapy(lua_State* L)
{
lua_State* co = lua_tothread(L, lua_upvalueindex(1));
int narg = cast_int(L->top - L->base);
int r = auxresume(L, co, narg);
if (r == CO_STATUS_BREAK)
return interruptThread(L, co);
return auxwrapfinish(L, r);
}
static int auxwrapcont(lua_State* L, int status)
{
lua_State* co = lua_tothread(L, lua_upvalueindex(1));
// if coroutine still hasn't yielded after the break, break current thread again
if (co->status == LUA_BREAK)
return interruptThread(L, co);
int r = auxresumecont(L, co);
return auxwrapfinish(L, r);
}
static int cocreate(lua_State* L)
{
luaL_checktype(L, 1, LUA_TFUNCTION);
lua_State* NL = lua_newthread(L);
lua_xpush(L, NL, 1); // push function on top of NL
return 1;
}
static int cowrap(lua_State* L)
{
cocreate(L);
lua_pushcclosurek(L, auxwrapy, NULL, 1, auxwrapcont);
return 1;
}
static int coyield(lua_State* L)
{
int nres = cast_int(L->top - L->base);
return lua_yield(L, nres);
}
static int corunning(lua_State* L)
{
if (lua_pushthread(L))
lua_pushnil(L); // main thread is not a coroutine
return 1;
}
static int coyieldable(lua_State* L)
{
lua_pushboolean(L, lua_isyieldable(L));
return 1;
}
static int coclose(lua_State* L)
{
lua_State* co = lua_tothread(L, 1);
luaL_argexpected(L, co, 1, "thread");
int status = lua_costatus(L, co);
if (status != LUA_COFIN && status != LUA_COERR && status != LUA_COSUS)
luaL_error(L, "cannot close %s coroutine", statnames[status]);
if (co->status == LUA_OK || co->status == LUA_YIELD)
{
lua_pushboolean(L, true);
lua_resetthread(co);
return 1;
}
else
{
lua_pushboolean(L, false);
if (lua_gettop(co))
lua_xmove(co, L, 1); // move error message
lua_resetthread(co);
return 2;
}
}
static const luaL_Reg co_funcs[] = {
{"create", cocreate},
{"running", corunning},
{"status", costatus},
{"wrap", cowrap},
{"yield", coyield},
{"isyieldable", coyieldable},
{"close", coclose},
{NULL, NULL},
};
int luaopen_coroutine(lua_State* L)
{
luaL_register(L, LUA_COLIBNAME, co_funcs);
lua_pushcclosurek(L, coresumey, "resume", 0, coresumecont);
lua_setfield(L, -2, "resume");
return 1;
}

View File

@ -1,166 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include "lvm.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
static lua_State* getthread(lua_State* L, int* arg)
{
if (lua_isthread(L, 1))
{
*arg = 1;
return lua_tothread(L, 1);
}
else
{
*arg = 0;
return L;
}
}
static int db_info(lua_State* L)
{
int arg;
lua_State* L1 = getthread(L, &arg);
// If L1 != L, L1 can be in any state, and therefore there are no guarantees about its stack space
if (L != L1)
lua_rawcheckstack(L1, 1); // for 'f' option
int level;
if (lua_isnumber(L, arg + 1))
{
level = (int)lua_tointeger(L, arg + 1);
luaL_argcheck(L, level >= 0, arg + 1, "level can't be negative");
}
else if (arg == 0 && lua_isfunction(L, 1))
{
// convert absolute index to relative index
level = -lua_gettop(L);
}
else
luaL_argerror(L, arg + 1, "function or level expected");
const char* options = luaL_checkstring(L, arg + 2);
lua_Debug ar;
if (!lua_getinfo(L1, level, options, &ar))
return 0;
int results = 0;
bool occurs[26] = {};
for (const char* it = options; *it; ++it)
{
if (unsigned(*it - 'a') < 26)
{
if (occurs[*it - 'a'])
luaL_argerror(L, arg + 2, "duplicate option");
occurs[*it - 'a'] = true;
}
switch (*it)
{
case 's':
lua_pushstring(L, ar.short_src);
results++;
break;
case 'l':
lua_pushinteger(L, ar.currentline);
results++;
break;
case 'n':
lua_pushstring(L, ar.name ? ar.name : "");
results++;
break;
case 'f':
if (L1 == L)
lua_pushvalue(L, -1 - results); // function is right before results
else
lua_xmove(L1, L, 1); // function is at top of L1
results++;
break;
case 'a':
lua_pushinteger(L, ar.nparams);
lua_pushboolean(L, ar.isvararg);
results += 2;
break;
default:
luaL_argerror(L, arg + 2, "invalid option");
}
}
return results;
}
static int db_traceback(lua_State* L)
{
int arg;
lua_State* L1 = getthread(L, &arg);
const char* msg = luaL_optstring(L, arg + 1, NULL);
int level = luaL_optinteger(L, arg + 2, (L == L1) ? 1 : 0);
luaL_argcheck(L, level >= 0, arg + 2, "level can't be negative");
luaL_Buffer buf;
luaL_buffinit(L, &buf);
if (msg)
{
luaL_addstring(&buf, msg);
luaL_addstring(&buf, "\n");
}
lua_Debug ar;
for (int i = level; lua_getinfo(L1, i, "sln", &ar); ++i)
{
if (strcmp(ar.what, "C") == 0)
continue;
if (ar.source)
luaL_addstring(&buf, ar.short_src);
if (ar.currentline > 0)
{
char line[32]; // manual conversion for performance
char* lineend = line + sizeof(line);
char* lineptr = lineend;
for (unsigned int r = ar.currentline; r > 0; r /= 10)
*--lineptr = '0' + (r % 10);
luaL_addchar(&buf, ':');
luaL_addlstring(&buf, lineptr, lineend - lineptr, -1);
}
if (ar.name)
{
luaL_addstring(&buf, " function ");
luaL_addstring(&buf, ar.name);
}
luaL_addchar(&buf, '\n');
}
luaL_pushresult(&buf);
return 1;
}
static const luaL_Reg dblib[] = {
{"info", db_info},
{"traceback", db_traceback},
{NULL, NULL},
};
int luaopen_debug(lua_State* L)
{
luaL_register(L, LUA_DBLIBNAME, dblib);
return 1;
}

View File

@ -1,568 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "ldebug.h"
#include "lapi.h"
#include "lfunc.h"
#include "lmem.h"
#include "lgc.h"
#include "ldo.h"
#include "lbytecode.h"
#include <string.h>
#include <stdio.h>
static const char* getfuncname(Closure* f);
static int currentpc(lua_State* L, CallInfo* ci)
{
return pcRel(ci->savedpc, ci_func(ci)->l.p);
}
static int currentline(lua_State* L, CallInfo* ci)
{
return luaG_getline(ci_func(ci)->l.p, currentpc(L, ci));
}
static Proto* getluaproto(CallInfo* ci)
{
return (isLua(ci) ? cast_to(Proto*, ci_func(ci)->l.p) : NULL);
}
int lua_getargument(lua_State* L, int level, int n)
{
if (unsigned(level) >= unsigned(L->ci - L->base_ci))
return 0;
CallInfo* ci = L->ci - level;
Proto* fp = getluaproto(ci);
int res = 0;
if (fp && n > 0)
{
if (n <= fp->numparams)
{
luaC_threadbarrier(L);
luaA_pushobject(L, ci->base + (n - 1));
res = 1;
}
else if (fp->is_vararg && n < ci->base - ci->func)
{
luaC_threadbarrier(L);
luaA_pushobject(L, ci->func + n);
res = 1;
}
}
return res;
}
const char* lua_getlocal(lua_State* L, int level, int n)
{
if (unsigned(level) >= unsigned(L->ci - L->base_ci))
return 0;
CallInfo* ci = L->ci - level;
Proto* fp = getluaproto(ci);
const LocVar* var = fp ? luaF_getlocal(fp, n, currentpc(L, ci)) : NULL;
if (var)
{
luaC_threadbarrier(L);
luaA_pushobject(L, ci->base + var->reg);
}
const char* name = var ? getstr(var->varname) : NULL;
return name;
}
const char* lua_setlocal(lua_State* L, int level, int n)
{
if (unsigned(level) >= unsigned(L->ci - L->base_ci))
return 0;
CallInfo* ci = L->ci - level;
Proto* fp = getluaproto(ci);
const LocVar* var = fp ? luaF_getlocal(fp, n, currentpc(L, ci)) : NULL;
if (var)
setobj2s(L, ci->base + var->reg, L->top - 1);
L->top--; // pop value
const char* name = var ? getstr(var->varname) : NULL;
return name;
}
static Closure* auxgetinfo(lua_State* L, const char* what, lua_Debug* ar, Closure* f, CallInfo* ci)
{
Closure* cl = NULL;
for (; *what; what++)
{
switch (*what)
{
case 's':
{
if (f->isC)
{
ar->source = "=[C]";
ar->what = "C";
ar->linedefined = -1;
ar->short_src = "[C]";
}
else
{
TString* source = f->l.p->source;
ar->source = getstr(source);
ar->what = "Lua";
ar->linedefined = f->l.p->linedefined;
ar->short_src = luaO_chunkid(ar->ssbuf, sizeof(ar->ssbuf), getstr(source), source->len);
}
break;
}
case 'l':
{
if (ci)
{
ar->currentline = isLua(ci) ? currentline(L, ci) : -1;
}
else
{
ar->currentline = f->isC ? -1 : f->l.p->linedefined;
}
break;
}
case 'u':
{
ar->nupvals = f->nupvalues;
break;
}
case 'a':
{
if (f->isC)
{
ar->isvararg = 1;
ar->nparams = 0;
}
else
{
ar->isvararg = f->l.p->is_vararg;
ar->nparams = f->l.p->numparams;
}
break;
}
case 'n':
{
ar->name = ci ? getfuncname(ci_func(ci)) : getfuncname(f);
break;
}
case 'f':
{
cl = f;
break;
}
default:;
}
}
return cl;
}
int lua_stackdepth(lua_State* L)
{
return int(L->ci - L->base_ci);
}
int lua_getinfo(lua_State* L, int level, const char* what, lua_Debug* ar)
{
Closure* f = NULL;
CallInfo* ci = NULL;
if (level < 0)
{
const TValue* func = luaA_toobject(L, level);
api_check(L, ttisfunction(func));
f = clvalue(func);
}
else if (unsigned(level) < unsigned(L->ci - L->base_ci))
{
ci = L->ci - level;
LUAU_ASSERT(ttisfunction(ci->func));
f = clvalue(ci->func);
}
if (f)
{
// auxgetinfo fills ar and optionally requests to put closure on stack
if (Closure* fcl = auxgetinfo(L, what, ar, f, ci))
{
luaC_threadbarrier(L);
setclvalue(L, L->top, fcl);
incr_top(L);
}
}
return f ? 1 : 0;
}
static const char* getfuncname(Closure* cl)
{
if (cl->isC)
{
if (cl->c.debugname)
{
return cl->c.debugname;
}
}
else
{
Proto* p = cl->l.p;
if (p->debugname)
{
return getstr(p->debugname);
}
}
return nullptr;
}
l_noret luaG_typeerrorL(lua_State* L, const TValue* o, const char* op)
{
const char* t = luaT_objtypename(L, o);
luaG_runerror(L, "attempt to %s a %s value", op, t);
}
l_noret luaG_forerrorL(lua_State* L, const TValue* o, const char* what)
{
const char* t = luaT_objtypename(L, o);
luaG_runerror(L, "invalid 'for' %s (number expected, got %s)", what, t);
}
l_noret luaG_concaterror(lua_State* L, StkId p1, StkId p2)
{
const char* t1 = luaT_objtypename(L, p1);
const char* t2 = luaT_objtypename(L, p2);
luaG_runerror(L, "attempt to concatenate %s with %s", t1, t2);
}
l_noret luaG_aritherror(lua_State* L, const TValue* p1, const TValue* p2, TMS op)
{
const char* t1 = luaT_objtypename(L, p1);
const char* t2 = luaT_objtypename(L, p2);
const char* opname = luaT_eventname[op] + 2; // skip __ from metamethod name
if (t1 == t2)
luaG_runerror(L, "attempt to perform arithmetic (%s) on %s", opname, t1);
else
luaG_runerror(L, "attempt to perform arithmetic (%s) on %s and %s", opname, t1, t2);
}
l_noret luaG_ordererror(lua_State* L, const TValue* p1, const TValue* p2, TMS op)
{
const char* t1 = luaT_objtypename(L, p1);
const char* t2 = luaT_objtypename(L, p2);
const char* opname = (op == TM_LT) ? "<" : (op == TM_LE) ? "<=" : "==";
luaG_runerror(L, "attempt to compare %s %s %s", t1, opname, t2);
}
l_noret luaG_indexerror(lua_State* L, const TValue* p1, const TValue* p2)
{
const char* t1 = luaT_objtypename(L, p1);
const char* t2 = luaT_objtypename(L, p2);
const TString* key = ttisstring(p2) ? tsvalue(p2) : 0;
if (key && key->len <= 64) // limit length to make sure we don't generate very long error messages for very long keys
luaG_runerror(L, "attempt to index %s with '%s'", t1, getstr(key));
else
luaG_runerror(L, "attempt to index %s with %s", t1, t2);
}
l_noret luaG_methoderror(lua_State* L, const TValue* p1, const TString* p2)
{
const char* t1 = luaT_objtypename(L, p1);
luaG_runerror(L, "attempt to call missing method '%s' of %s", getstr(p2), t1);
}
l_noret luaG_readonlyerror(lua_State* L)
{
luaG_runerror(L, "attempt to modify a readonly table");
}
static void pusherror(lua_State* L, const char* msg)
{
CallInfo* ci = L->ci;
if (isLua(ci))
{
TString* source = getluaproto(ci)->source;
char chunkbuf[LUA_IDSIZE]; // add file:line information
const char* chunkid = luaO_chunkid(chunkbuf, sizeof(chunkbuf), getstr(source), source->len);
int line = currentline(L, ci);
luaO_pushfstring(L, "%s:%d: %s", chunkid, line, msg);
}
else
{
lua_pushstring(L, msg);
}
}
l_noret luaG_runerrorL(lua_State* L, const char* fmt, ...)
{
va_list argp;
va_start(argp, fmt);
char result[LUA_BUFFERSIZE];
vsnprintf(result, sizeof(result), fmt, argp);
va_end(argp);
pusherror(L, result);
luaD_throw(L, LUA_ERRRUN);
}
void luaG_pusherror(lua_State* L, const char* error)
{
pusherror(L, error);
}
void luaG_breakpoint(lua_State* L, Proto* p, int line, bool enable)
{
if (p->lineinfo)
{
for (int i = 0; i < p->sizecode; ++i)
{
// note: we keep prologue as is, instead opting to break at the first meaningful instruction
if (LUAU_INSN_OP(p->code[i]) == LOP_PREPVARARGS)
continue;
if (luaG_getline(p, i) != line)
continue;
// lazy copy of the original opcode array; done when the first breakpoint is set
if (!p->debuginsn)
{
p->debuginsn = luaM_newarray(L, p->sizecode, uint8_t, p->memcat);
for (int j = 0; j < p->sizecode; ++j)
p->debuginsn[j] = LUAU_INSN_OP(p->code[j]);
}
uint8_t op = enable ? LOP_BREAK : LUAU_INSN_OP(p->debuginsn[i]);
// patch just the opcode byte, leave arguments alone
p->code[i] &= ~0xff;
p->code[i] |= op;
LUAU_ASSERT(LUAU_INSN_OP(p->code[i]) == op);
#if LUA_CUSTOM_EXECUTION
if (L->global->ecb.setbreakpoint)
L->global->ecb.setbreakpoint(L, p, i);
#endif
// note: this is important!
// we only patch the *first* instruction in each proto that's attributed to a given line
// this can be changed, but if requires making patching a bit more nuanced so that we don't patch AUX words
break;
}
}
for (int i = 0; i < p->sizep; ++i)
{
luaG_breakpoint(L, p->p[i], line, enable);
}
}
bool luaG_onbreak(lua_State* L)
{
if (L->ci == L->base_ci)
return false;
if (!isLua(L->ci))
return false;
return LUAU_INSN_OP(*L->ci->savedpc) == LOP_BREAK;
}
int luaG_getline(Proto* p, int pc)
{
LUAU_ASSERT(pc >= 0 && pc < p->sizecode);
if (!p->lineinfo)
return 0;
return p->abslineinfo[pc >> p->linegaplog2] + p->lineinfo[pc];
}
void lua_singlestep(lua_State* L, int enabled)
{
L->singlestep = bool(enabled);
}
static int getmaxline(Proto* p)
{
int result = -1;
for (int i = 0; i < p->sizecode; ++i)
{
int line = luaG_getline(p, i);
result = result < line ? line : result;
}
for (int i = 0; i < p->sizep; ++i)
{
int psize = getmaxline(p->p[i]);
result = result < psize ? psize : result;
}
return result;
}
// Find the line number with instructions. If the provided line doesn't have any instruction, it should return the next line number with
// instructions.
static int getnextline(Proto* p, int line)
{
int closest = -1;
if (p->lineinfo)
{
for (int i = 0; i < p->sizecode; ++i)
{
// note: we keep prologue as is, instead opting to break at the first meaningful instruction
if (LUAU_INSN_OP(p->code[i]) == LOP_PREPVARARGS)
continue;
int candidate = luaG_getline(p, i);
if (candidate == line)
return line;
if (candidate > line && (closest == -1 || candidate < closest))
closest = candidate;
}
}
for (int i = 0; i < p->sizep; ++i)
{
// Find the closest line number to the intended one.
int candidate = getnextline(p->p[i], line);
if (candidate == line)
return line;
if (candidate > line && (closest == -1 || candidate < closest))
closest = candidate;
}
return closest;
}
int lua_breakpoint(lua_State* L, int funcindex, int line, int enabled)
{
const TValue* func = luaA_toobject(L, funcindex);
api_check(L, ttisfunction(func) && !clvalue(func)->isC);
Proto* p = clvalue(func)->l.p;
// Find line number to add the breakpoint to.
int target = getnextline(p, line);
if (target != -1)
{
// Add breakpoint on the exact line
luaG_breakpoint(L, p, target, bool(enabled));
}
return target;
}
static void getcoverage(Proto* p, int depth, int* buffer, size_t size, void* context, lua_Coverage callback)
{
memset(buffer, -1, size * sizeof(int));
for (int i = 0; i < p->sizecode; ++i)
{
Instruction insn = p->code[i];
if (LUAU_INSN_OP(insn) != LOP_COVERAGE)
continue;
int line = luaG_getline(p, i);
int hits = LUAU_INSN_E(insn);
LUAU_ASSERT(size_t(line) < size);
buffer[line] = buffer[line] < hits ? hits : buffer[line];
}
const char* debugname = p->debugname ? getstr(p->debugname) : NULL;
int linedefined = p->linedefined;
callback(context, debugname, linedefined, depth, buffer, size);
for (int i = 0; i < p->sizep; ++i)
getcoverage(p->p[i], depth + 1, buffer, size, context, callback);
}
void lua_getcoverage(lua_State* L, int funcindex, void* context, lua_Coverage callback)
{
const TValue* func = luaA_toobject(L, funcindex);
api_check(L, ttisfunction(func) && !clvalue(func)->isC);
Proto* p = clvalue(func)->l.p;
size_t size = getmaxline(p) + 1;
if (size == 0)
return;
int* buffer = luaM_newarray(L, size, int, 0);
getcoverage(p, 0, buffer, size, context, callback);
luaM_freearray(L, buffer, size, int, 0);
}
static size_t append(char* buf, size_t bufsize, size_t offset, const char* data)
{
size_t size = strlen(data);
size_t copy = offset + size >= bufsize ? bufsize - offset - 1 : size;
memcpy(buf + offset, data, copy);
return offset + copy;
}
const char* lua_debugtrace(lua_State* L)
{
static char buf[4096];
const int limit1 = 10;
const int limit2 = 10;
int depth = int(L->ci - L->base_ci);
size_t offset = 0;
lua_Debug ar;
for (int level = 0; lua_getinfo(L, level, "sln", &ar); ++level)
{
if (ar.source)
offset = append(buf, sizeof(buf), offset, ar.short_src);
if (ar.currentline > 0)
{
char line[32];
snprintf(line, sizeof(line), ":%d", ar.currentline);
offset = append(buf, sizeof(buf), offset, line);
}
if (ar.name)
{
offset = append(buf, sizeof(buf), offset, " function ");
offset = append(buf, sizeof(buf), offset, ar.name);
}
offset = append(buf, sizeof(buf), offset, "\n");
if (depth > limit1 + limit2 && level == limit1 - 1)
{
char skip[32];
snprintf(skip, sizeof(skip), "... (+%d frames)\n", int(depth - limit1 - limit2));
offset = append(buf, sizeof(buf), offset, skip);
level = depth - limit2 - 1;
}
}
LUAU_ASSERT(offset < sizeof(buf));
buf[offset] = '\0';
return buf;
}

View File

@ -1,31 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lstate.h"
#define pcRel(pc, p) ((pc) ? cast_to(int, (pc) - (p)->code) - 1 : 0)
#define luaG_typeerror(L, o, opname) luaG_typeerrorL(L, o, opname)
#define luaG_forerror(L, o, what) luaG_forerrorL(L, o, what)
#define luaG_runerror(L, fmt, ...) luaG_runerrorL(L, fmt, ##__VA_ARGS__)
#define LUA_MEMERRMSG "not enough memory"
#define LUA_ERRERRMSG "error in error handling"
LUAI_FUNC l_noret luaG_typeerrorL(lua_State* L, const TValue* o, const char* opname);
LUAI_FUNC l_noret luaG_forerrorL(lua_State* L, const TValue* o, const char* what);
LUAI_FUNC l_noret luaG_concaterror(lua_State* L, StkId p1, StkId p2);
LUAI_FUNC l_noret luaG_aritherror(lua_State* L, const TValue* p1, const TValue* p2, TMS op);
LUAI_FUNC l_noret luaG_ordererror(lua_State* L, const TValue* p1, const TValue* p2, TMS op);
LUAI_FUNC l_noret luaG_indexerror(lua_State* L, const TValue* p1, const TValue* p2);
LUAI_FUNC l_noret luaG_methoderror(lua_State* L, const TValue* p1, const TString* p2);
LUAI_FUNC l_noret luaG_readonlyerror(lua_State* L);
LUAI_FUNC LUA_PRINTF_ATTR(2, 3) l_noret luaG_runerrorL(lua_State* L, const char* fmt, ...);
LUAI_FUNC void luaG_pusherror(lua_State* L, const char* error);
LUAI_FUNC void luaG_breakpoint(lua_State* L, Proto* p, int line, bool enable);
LUAI_FUNC bool luaG_onbreak(lua_State* L);
LUAI_FUNC int luaG_getline(Proto* p, int pc);

View File

@ -1,600 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "ldo.h"
#include "lstring.h"
#include "lfunc.h"
#include "lgc.h"
#include "lmem.h"
#include "lvm.h"
#if LUA_USE_LONGJMP
#include <setjmp.h>
#include <stdlib.h>
#else
#include <stdexcept>
#endif
#include <string.h>
/*
** {======================================================
** Error-recovery functions
** =======================================================
*/
#if LUA_USE_LONGJMP
struct lua_jmpbuf
{
lua_jmpbuf* volatile prev;
volatile int status;
jmp_buf buf;
};
// use POSIX versions of setjmp/longjmp if possible: they don't save/restore signal mask and are therefore faster
#if defined(__linux__) || defined(__APPLE__)
#define LUAU_SETJMP(buf) _setjmp(buf)
#define LUAU_LONGJMP(buf, code) _longjmp(buf, code)
#else
#define LUAU_SETJMP(buf) setjmp(buf)
#define LUAU_LONGJMP(buf, code) longjmp(buf, code)
#endif
int luaD_rawrunprotected(lua_State* L, Pfunc f, void* ud)
{
lua_jmpbuf jb;
jb.prev = L->global->errorjmp;
jb.status = 0;
L->global->errorjmp = &jb;
if (LUAU_SETJMP(jb.buf) == 0)
f(L, ud);
L->global->errorjmp = jb.prev;
return jb.status;
}
l_noret luaD_throw(lua_State* L, int errcode)
{
if (lua_jmpbuf* jb = L->global->errorjmp)
{
jb->status = errcode;
LUAU_LONGJMP(jb->buf, 1);
}
if (L->global->cb.panic)
L->global->cb.panic(L, errcode);
abort();
}
#else
class lua_exception : public std::exception
{
public:
lua_exception(lua_State* L, int status)
: L(L)
, status(status)
{
}
const char* what() const throw() override
{
// LUA_ERRRUN passes error object on the stack
if (status == LUA_ERRRUN)
if (const char* str = lua_tostring(L, -1))
return str;
switch (status)
{
case LUA_ERRRUN:
return "lua_exception: runtime error";
case LUA_ERRSYNTAX:
return "lua_exception: syntax error";
case LUA_ERRMEM:
return "lua_exception: " LUA_MEMERRMSG;
case LUA_ERRERR:
return "lua_exception: " LUA_ERRERRMSG;
default:
return "lua_exception: unexpected exception status";
}
}
int getStatus() const
{
return status;
}
private:
lua_State* L;
int status;
};
int luaD_rawrunprotected(lua_State* L, Pfunc f, void* ud)
{
int status = 0;
try
{
f(L, ud);
return 0;
}
catch (lua_exception& e)
{
// lua_exception means that luaD_throw was called and an exception object is on stack if status is ERRRUN
status = e.getStatus();
}
catch (std::exception& e)
{
// Luau will never throw this, but this can catch exceptions that escape from C++ implementations of external functions
try
{
// there's no exception object on stack; let's push the error on stack so that error handling below can proceed
luaG_pusherror(L, e.what());
status = LUA_ERRRUN;
}
catch (std::exception&)
{
// out of memory while allocating error string
status = LUA_ERRMEM;
}
}
return status;
}
l_noret luaD_throw(lua_State* L, int errcode)
{
throw lua_exception(L, errcode);
}
#endif
// }======================================================
static void correctstack(lua_State* L, TValue* oldstack)
{
L->top = (L->top - oldstack) + L->stack;
for (UpVal* up = L->openupval; up != NULL; up = up->u.open.threadnext)
up->v = (up->v - oldstack) + L->stack;
for (CallInfo* ci = L->base_ci; ci <= L->ci; ci++)
{
ci->top = (ci->top - oldstack) + L->stack;
ci->base = (ci->base - oldstack) + L->stack;
ci->func = (ci->func - oldstack) + L->stack;
}
L->base = (L->base - oldstack) + L->stack;
}
void luaD_reallocstack(lua_State* L, int newsize)
{
TValue* oldstack = L->stack;
int realsize = newsize + EXTRA_STACK;
LUAU_ASSERT(L->stack_last - L->stack == L->stacksize - EXTRA_STACK);
luaM_reallocarray(L, L->stack, L->stacksize, realsize, TValue, L->memcat);
TValue* newstack = L->stack;
for (int i = L->stacksize; i < realsize; i++)
setnilvalue(newstack + i); // erase new segment
L->stacksize = realsize;
L->stack_last = newstack + newsize;
correctstack(L, oldstack);
}
void luaD_reallocCI(lua_State* L, int newsize)
{
CallInfo* oldci = L->base_ci;
luaM_reallocarray(L, L->base_ci, L->size_ci, newsize, CallInfo, L->memcat);
L->size_ci = newsize;
L->ci = (L->ci - oldci) + L->base_ci;
L->end_ci = L->base_ci + L->size_ci - 1;
}
void luaD_growstack(lua_State* L, int n)
{
if (n <= L->stacksize) // double size is enough?
luaD_reallocstack(L, 2 * L->stacksize);
else
luaD_reallocstack(L, L->stacksize + n);
}
CallInfo* luaD_growCI(lua_State* L)
{
// allow extra stack space to handle stack overflow in xpcall
const int hardlimit = LUAI_MAXCALLS + (LUAI_MAXCALLS >> 3);
if (L->size_ci >= hardlimit)
luaD_throw(L, LUA_ERRERR); // error while handling stack error
int request = L->size_ci * 2;
luaD_reallocCI(L, L->size_ci >= LUAI_MAXCALLS ? hardlimit : request < LUAI_MAXCALLS ? request : LUAI_MAXCALLS);
if (L->size_ci > LUAI_MAXCALLS)
luaG_runerror(L, "stack overflow");
return ++L->ci;
}
void luaD_checkCstack(lua_State* L)
{
// allow extra stack space to handle stack overflow in xpcall
const int hardlimit = LUAI_MAXCCALLS + (LUAI_MAXCCALLS >> 3);
if (L->nCcalls == LUAI_MAXCCALLS)
luaG_runerror(L, "C stack overflow");
else if (L->nCcalls >= hardlimit)
luaD_throw(L, LUA_ERRERR); // error while handling stack error
}
/*
** Call a function (C or Lua). The function to be called is at *func.
** The arguments are on the stack, right after the function.
** When returns, all the results are on the stack, starting at the original
** function position.
*/
void luaD_call(lua_State* L, StkId func, int nresults)
{
if (++L->nCcalls >= LUAI_MAXCCALLS)
luaD_checkCstack(L);
ptrdiff_t old_func = savestack(L, func);
if (luau_precall(L, func, nresults) == PCRLUA)
{ // is a Lua function?
L->ci->flags |= LUA_CALLINFO_RETURN; // luau_execute will stop after returning from the stack frame
bool oldactive = L->isactive;
L->isactive = true;
luaC_threadbarrier(L);
luau_execute(L); // call it
if (!oldactive)
L->isactive = false;
}
if (nresults != LUA_MULTRET)
L->top = restorestack(L, old_func) + nresults;
L->nCcalls--;
luaC_checkGC(L);
}
static void seterrorobj(lua_State* L, int errcode, StkId oldtop)
{
switch (errcode)
{
case LUA_ERRMEM:
{
setsvalue(L, oldtop, luaS_newliteral(L, LUA_MEMERRMSG)); // can not fail because string is pinned in luaopen
break;
}
case LUA_ERRERR:
{
setsvalue(L, oldtop, luaS_newliteral(L, LUA_ERRERRMSG)); // can not fail because string is pinned in luaopen
break;
}
case LUA_ERRSYNTAX:
case LUA_ERRRUN:
{
setobj2s(L, oldtop, L->top - 1); // error message on current top
break;
}
}
L->top = oldtop + 1;
}
static void resume_continue(lua_State* L)
{
// unroll Lua/C combined stack, processing continuations
while (L->status == 0 && L->ci > L->base_ci)
{
LUAU_ASSERT(L->baseCcalls == L->nCcalls);
Closure* cl = curr_func(L);
if (cl->isC)
{
LUAU_ASSERT(cl->c.cont);
// C continuation; we expect this to be followed by Lua continuations
int n = cl->c.cont(L, 0);
// Continuation can break again
if (L->status == LUA_BREAK)
break;
luau_poscall(L, L->top - n);
}
else
{
// Lua continuation; it terminates at the end of the stack or at another C continuation
luau_execute(L);
}
}
}
static void resume(lua_State* L, void* ud)
{
StkId firstArg = cast_to(StkId, ud);
if (L->status == 0)
{
// start coroutine
LUAU_ASSERT(L->ci == L->base_ci && firstArg >= L->base);
if (firstArg == L->base)
luaG_runerror(L, "cannot resume dead coroutine");
if (luau_precall(L, firstArg - 1, LUA_MULTRET) != PCRLUA)
return;
L->ci->flags |= LUA_CALLINFO_RETURN;
}
else
{
// resume from previous yield or break
LUAU_ASSERT(L->status == LUA_YIELD || L->status == LUA_BREAK);
L->status = 0;
Closure* cl = curr_func(L);
if (cl->isC)
{
// if the top stack frame is a C call continuation, resume_continue will handle that case
if (!cl->c.cont)
{
// finish interrupted execution of `OP_CALL'
luau_poscall(L, firstArg);
}
}
else
{
// yielded inside a hook: just continue its execution
L->base = L->ci->base;
}
}
// run continuations from the stack; typically resumes Lua code and pcalls
resume_continue(L);
}
static CallInfo* resume_findhandler(lua_State* L)
{
CallInfo* ci = L->ci;
while (ci > L->base_ci)
{
if (ci->flags & LUA_CALLINFO_HANDLE)
return ci;
ci--;
}
return NULL;
}
static void resume_handle(lua_State* L, void* ud)
{
CallInfo* ci = (CallInfo*)ud;
Closure* cl = ci_func(ci);
LUAU_ASSERT(ci->flags & LUA_CALLINFO_HANDLE);
LUAU_ASSERT(cl->isC && cl->c.cont);
LUAU_ASSERT(L->status != 0);
// restore nCcalls back to base since this might not have happened during error handling
L->nCcalls = L->baseCcalls;
// make sure we don't run the handler the second time
ci->flags &= ~LUA_CALLINFO_HANDLE;
// restore thread status to 0 since we're handling the error
int status = L->status;
L->status = 0;
// push error object to stack top if it's not already there
if (status != LUA_ERRRUN)
seterrorobj(L, status, L->top);
// adjust the stack frame for ci to prepare for cont call
L->base = ci->base;
ci->top = L->top;
// save ci pointer - it will be invalidated by cont call!
ptrdiff_t old_ci = saveci(L, ci);
// handle the error in continuation; note that this executes on top of original stack!
int n = cl->c.cont(L, status);
// restore the stack frame to the frame with continuation
L->ci = restoreci(L, old_ci);
// close eventual pending closures; this means it's now safe to restore stack
luaF_close(L, L->base);
// finish cont call and restore stack to previous ci top
luau_poscall(L, L->top - n);
// run remaining continuations from the stack; typically resumes pcalls
resume_continue(L);
}
static int resume_error(lua_State* L, const char* msg)
{
L->top = L->ci->base;
setsvalue(L, L->top, luaS_new(L, msg));
incr_top(L);
return LUA_ERRRUN;
}
static void resume_finish(lua_State* L, int status)
{
L->nCcalls = L->baseCcalls;
L->isactive = false;
if (status != 0)
{ // error?
L->status = cast_byte(status); // mark thread as `dead'
seterrorobj(L, status, L->top);
L->ci->top = L->top;
}
else if (L->status == 0)
{
expandstacklimit(L, L->top);
}
}
int lua_resume(lua_State* L, lua_State* from, int nargs)
{
int status;
if (L->status != LUA_YIELD && L->status != LUA_BREAK && (L->status != 0 || L->ci != L->base_ci))
return resume_error(L, "cannot resume non-suspended coroutine");
L->nCcalls = from ? from->nCcalls : 0;
if (L->nCcalls >= LUAI_MAXCCALLS)
return resume_error(L, "C stack overflow");
L->baseCcalls = ++L->nCcalls;
L->isactive = true;
luaC_threadbarrier(L);
status = luaD_rawrunprotected(L, resume, L->top - nargs);
CallInfo* ch = NULL;
while (status != 0 && (ch = resume_findhandler(L)) != NULL)
{
L->status = cast_byte(status);
status = luaD_rawrunprotected(L, resume_handle, ch);
}
resume_finish(L, status);
--L->nCcalls;
return L->status;
}
int lua_resumeerror(lua_State* L, lua_State* from)
{
int status;
if (L->status != LUA_YIELD && L->status != LUA_BREAK && (L->status != 0 || L->ci != L->base_ci))
return resume_error(L, "cannot resume non-suspended coroutine");
L->nCcalls = from ? from->nCcalls : 0;
if (L->nCcalls >= LUAI_MAXCCALLS)
return resume_error(L, "C stack overflow");
L->baseCcalls = ++L->nCcalls;
L->isactive = true;
luaC_threadbarrier(L);
status = LUA_ERRRUN;
CallInfo* ch = NULL;
while (status != 0 && (ch = resume_findhandler(L)) != NULL)
{
L->status = cast_byte(status);
status = luaD_rawrunprotected(L, resume_handle, ch);
}
resume_finish(L, status);
--L->nCcalls;
return L->status;
}
int lua_yield(lua_State* L, int nresults)
{
if (L->nCcalls > L->baseCcalls)
luaG_runerror(L, "attempt to yield across metamethod/C-call boundary");
L->base = L->top - nresults; // protect stack slots below
L->status = LUA_YIELD;
return -1;
}
int lua_break(lua_State* L)
{
if (L->nCcalls > L->baseCcalls)
luaG_runerror(L, "attempt to break across metamethod/C-call boundary");
L->status = LUA_BREAK;
return -1;
}
int lua_isyieldable(lua_State* L)
{
return (L->nCcalls <= L->baseCcalls);
}
static void callerrfunc(lua_State* L, void* ud)
{
StkId errfunc = cast_to(StkId, ud);
setobj2s(L, L->top, L->top - 1);
setobj2s(L, L->top - 1, errfunc);
incr_top(L);
luaD_call(L, L->top - 2, 1);
}
static void restore_stack_limit(lua_State* L)
{
LUAU_ASSERT(L->stack_last - L->stack == L->stacksize - EXTRA_STACK);
if (L->size_ci > LUAI_MAXCALLS)
{ // there was an overflow?
int inuse = cast_int(L->ci - L->base_ci);
if (inuse + 1 < LUAI_MAXCALLS) // can `undo' overflow?
luaD_reallocCI(L, LUAI_MAXCALLS);
}
}
int luaD_pcall(lua_State* L, Pfunc func, void* u, ptrdiff_t old_top, ptrdiff_t ef)
{
unsigned short oldnCcalls = L->nCcalls;
ptrdiff_t old_ci = saveci(L, L->ci);
bool oldactive = L->isactive;
int status = luaD_rawrunprotected(L, func, u);
if (status != 0)
{
int errstatus = status;
// call user-defined error function (used in xpcall)
if (ef)
{
// push error object to stack top if it's not already there
if (status != LUA_ERRRUN)
seterrorobj(L, status, L->top);
// if errfunc fails, we fail with "error in error handling" or "not enough memory"
int err = luaD_rawrunprotected(L, callerrfunc, restorestack(L, ef));
// in general we preserve the status, except for cases when the error handler fails
// out of memory is treated specially because it's common for it to be cascading, in which case we preserve the code
if (err == 0)
errstatus = LUA_ERRRUN;
else if (status == LUA_ERRMEM && err == LUA_ERRMEM)
errstatus = LUA_ERRMEM;
else
errstatus = status = LUA_ERRERR;
}
// since the call failed with an error, we might have to reset the 'active' thread state
if (!oldactive)
L->isactive = false;
// restore nCcalls before calling the debugprotectederror callback which may rely on the proper value to have been restored.
L->nCcalls = oldnCcalls;
// an error occurred, check if we have a protected error callback
if (L->global->cb.debugprotectederror)
{
L->global->cb.debugprotectederror(L);
// debug hook is only allowed to break
if (L->status == LUA_BREAK)
return 0;
}
StkId oldtop = restorestack(L, old_top);
luaF_close(L, oldtop); // close eventual pending closures
seterrorobj(L, errstatus, oldtop);
L->ci = restoreci(L, old_ci);
L->base = L->ci->base;
restore_stack_limit(L);
}
return status;
}

View File

@ -1,55 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
#include "lstate.h"
#include "luaconf.h"
#include "ldebug.h"
#define luaD_checkstack(L, n) \
if ((char*)L->stack_last - (char*)L->top <= (n) * (int)sizeof(TValue)) \
luaD_growstack(L, n); \
else \
condhardstacktests(luaD_reallocstack(L, L->stacksize - EXTRA_STACK));
#define incr_top(L) \
{ \
luaD_checkstack(L, 1); \
L->top++; \
}
#define savestack(L, p) ((char*)(p) - (char*)L->stack)
#define restorestack(L, n) ((TValue*)((char*)L->stack + (n)))
#define expandstacklimit(L, p) \
{ \
LUAU_ASSERT((p) <= (L)->stack_last); \
if ((L)->ci->top < (p)) \
(L)->ci->top = (p); \
}
#define incr_ci(L) ((L->ci == L->end_ci) ? luaD_growCI(L) : (condhardstacktests(luaD_reallocCI(L, L->size_ci)), ++L->ci))
#define saveci(L, p) ((char*)(p) - (char*)L->base_ci)
#define restoreci(L, n) ((CallInfo*)((char*)L->base_ci + (n)))
// results from luaD_precall
#define PCRLUA 0 // initiated a call to a Lua function
#define PCRC 1 // did a call to a C function
#define PCRYIELD 2 // C function yielded
// type of protected functions, to be ran by `runprotected'
typedef void (*Pfunc)(lua_State* L, void* ud);
LUAI_FUNC CallInfo* luaD_growCI(lua_State* L);
LUAI_FUNC void luaD_call(lua_State* L, StkId func, int nresults);
LUAI_FUNC int luaD_pcall(lua_State* L, Pfunc func, void* u, ptrdiff_t oldtop, ptrdiff_t ef);
LUAI_FUNC void luaD_reallocCI(lua_State* L, int newsize);
LUAI_FUNC void luaD_reallocstack(lua_State* L, int newsize);
LUAI_FUNC void luaD_growstack(lua_State* L, int n);
LUAI_FUNC void luaD_checkCstack(lua_State* L);
LUAI_FUNC l_noret luaD_throw(lua_State* L, int errcode);
LUAI_FUNC int luaD_rawrunprotected(lua_State* L, Pfunc f, void* ud);

View File

@ -1,196 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lfunc.h"
#include "lstate.h"
#include "lmem.h"
#include "lgc.h"
Proto* luaF_newproto(lua_State* L)
{
Proto* f = luaM_newgco(L, Proto, sizeof(Proto), L->activememcat);
luaC_init(L, f, LUA_TPROTO);
f->k = NULL;
f->sizek = 0;
f->p = NULL;
f->sizep = 0;
f->code = NULL;
f->sizecode = 0;
f->sizeupvalues = 0;
f->nups = 0;
f->upvalues = NULL;
f->numparams = 0;
f->is_vararg = 0;
f->maxstacksize = 0;
f->sizelineinfo = 0;
f->linegaplog2 = 0;
f->lineinfo = NULL;
f->abslineinfo = NULL;
f->sizelocvars = 0;
f->locvars = NULL;
f->source = NULL;
f->debugname = NULL;
f->debuginsn = NULL;
f->codeentry = NULL;
f->execdata = NULL;
f->exectarget = 0;
return f;
}
Closure* luaF_newLclosure(lua_State* L, int nelems, Table* e, Proto* p)
{
Closure* c = luaM_newgco(L, Closure, sizeLclosure(nelems), L->activememcat);
luaC_init(L, c, LUA_TFUNCTION);
c->isC = 0;
c->env = e;
c->nupvalues = cast_byte(nelems);
c->stacksize = p->maxstacksize;
c->preload = 0;
c->l.p = p;
for (int i = 0; i < nelems; ++i)
setnilvalue(&c->l.uprefs[i]);
return c;
}
Closure* luaF_newCclosure(lua_State* L, int nelems, Table* e)
{
Closure* c = luaM_newgco(L, Closure, sizeCclosure(nelems), L->activememcat);
luaC_init(L, c, LUA_TFUNCTION);
c->isC = 1;
c->env = e;
c->nupvalues = cast_byte(nelems);
c->stacksize = LUA_MINSTACK;
c->preload = 0;
c->c.f = NULL;
c->c.cont = NULL;
c->c.debugname = NULL;
return c;
}
UpVal* luaF_findupval(lua_State* L, StkId level)
{
global_State* g = L->global;
UpVal** pp = &L->openupval;
UpVal* p;
while (*pp != NULL && (p = *pp)->v >= level)
{
LUAU_ASSERT(!isdead(g, obj2gco(p)));
LUAU_ASSERT(upisopen(p));
if (p->v == level)
return p;
pp = &p->u.open.threadnext;
}
LUAU_ASSERT(L->isactive);
LUAU_ASSERT(!isblack(obj2gco(L))); // we don't use luaC_threadbarrier because active threads never turn black
UpVal* uv = luaM_newgco(L, UpVal, sizeof(UpVal), L->activememcat); // not found: create a new one
luaC_init(L, uv, LUA_TUPVAL);
uv->markedopen = 0;
uv->v = level; // current value lives in the stack
// chain the upvalue in the threads open upvalue list at the proper position
uv->u.open.threadnext = *pp;
*pp = uv;
// double link the upvalue in the global open upvalue list
uv->u.open.prev = &g->uvhead;
uv->u.open.next = g->uvhead.u.open.next;
uv->u.open.next->u.open.prev = uv;
g->uvhead.u.open.next = uv;
LUAU_ASSERT(uv->u.open.next->u.open.prev == uv && uv->u.open.prev->u.open.next == uv);
return uv;
}
void luaF_freeupval(lua_State* L, UpVal* uv, lua_Page* page)
{
luaM_freegco(L, uv, sizeof(UpVal), uv->memcat, page); // free upvalue
}
void luaF_close(lua_State* L, StkId level)
{
global_State* g = L->global;
UpVal* uv;
while (L->openupval != NULL && (uv = L->openupval)->v >= level)
{
GCObject* o = obj2gco(uv);
LUAU_ASSERT(!isblack(o) && upisopen(uv));
LUAU_ASSERT(!isdead(g, o));
// unlink value *before* closing it since value storage overlaps
L->openupval = uv->u.open.threadnext;
luaF_closeupval(L, uv, /* dead= */ false);
}
}
void luaF_closeupval(lua_State* L, UpVal* uv, bool dead)
{
// unlink value from all lists *before* closing it since value storage overlaps
LUAU_ASSERT(uv->u.open.next->u.open.prev == uv && uv->u.open.prev->u.open.next == uv);
uv->u.open.next->u.open.prev = uv->u.open.prev;
uv->u.open.prev->u.open.next = uv->u.open.next;
if (dead)
return;
setobj(L, &uv->u.value, uv->v);
uv->v = &uv->u.value;
luaC_upvalclosed(L, uv);
}
void luaF_freeproto(lua_State* L, Proto* f, lua_Page* page)
{
luaM_freearray(L, f->code, f->sizecode, Instruction, f->memcat);
luaM_freearray(L, f->p, f->sizep, Proto*, f->memcat);
luaM_freearray(L, f->k, f->sizek, TValue, f->memcat);
if (f->lineinfo)
luaM_freearray(L, f->lineinfo, f->sizelineinfo, uint8_t, f->memcat);
luaM_freearray(L, f->locvars, f->sizelocvars, struct LocVar, f->memcat);
luaM_freearray(L, f->upvalues, f->sizeupvalues, TString*, f->memcat);
if (f->debuginsn)
luaM_freearray(L, f->debuginsn, f->sizecode, uint8_t, f->memcat);
#if LUA_CUSTOM_EXECUTION
if (f->execdata)
{
LUAU_ASSERT(L->global->ecb.destroy);
L->global->ecb.destroy(L, f);
}
#endif
luaM_freegco(L, f, sizeof(Proto), f->memcat, page);
}
void luaF_freeclosure(lua_State* L, Closure* c, lua_Page* page)
{
int size = c->isC ? sizeCclosure(c->nupvalues) : sizeLclosure(c->nupvalues);
luaM_freegco(L, c, size, c->memcat, page);
}
const LocVar* luaF_getlocal(const Proto* f, int local_number, int pc)
{
for (int i = 0; i < f->sizelocvars; i++)
{
if (pc >= f->locvars[i].startpc && pc < f->locvars[i].endpc)
{ // is variable active?
local_number--;
if (local_number == 0)
return &f->locvars[i];
}
}
return NULL; // not found
}
const LocVar* luaF_findlocal(const Proto* f, int local_reg, int pc)
{
for (int i = 0; i < f->sizelocvars; i++)
if (local_reg == f->locvars[i].reg && pc >= f->locvars[i].startpc && pc < f->locvars[i].endpc)
return &f->locvars[i];
return NULL; // not found
}

View File

@ -1,20 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
#define sizeCclosure(n) (offsetof(Closure, c.upvals) + sizeof(TValue) * (n))
#define sizeLclosure(n) (offsetof(Closure, l.uprefs) + sizeof(TValue) * (n))
LUAI_FUNC Proto* luaF_newproto(lua_State* L);
LUAI_FUNC Closure* luaF_newLclosure(lua_State* L, int nelems, Table* e, Proto* p);
LUAI_FUNC Closure* luaF_newCclosure(lua_State* L, int nelems, Table* e);
LUAI_FUNC UpVal* luaF_findupval(lua_State* L, StkId level);
LUAI_FUNC void luaF_close(lua_State* L, StkId level);
LUAI_FUNC void luaF_closeupval(lua_State* L, UpVal* uv, bool dead);
LUAI_FUNC void luaF_freeproto(lua_State* L, Proto* f, struct lua_Page* page);
LUAI_FUNC void luaF_freeclosure(lua_State* L, Closure* c, struct lua_Page* page);
LUAI_FUNC void luaF_freeupval(lua_State* L, UpVal* uv, struct lua_Page* page);
LUAI_FUNC const LocVar* luaF_getlocal(const Proto* func, int local_number, int pc);
LUAI_FUNC const LocVar* luaF_findlocal(const Proto* func, int local_reg, int pc);

File diff suppressed because it is too large Load Diff

View File

@ -1,138 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "ldo.h"
#include "lobject.h"
#include "lstate.h"
/*
** Default settings for GC tunables (settable via lua_gc)
*/
#define LUAI_GCGOAL 200 // 200% (allow heap to double compared to live heap size)
#define LUAI_GCSTEPMUL 200 // GC runs 'twice the speed' of memory allocation
#define LUAI_GCSTEPSIZE 1 // GC runs every KB of memory allocation
/*
** Possible states of the Garbage Collector
*/
#define GCSpause 0
#define GCSpropagate 1
#define GCSpropagateagain 2
#define GCSatomic 3
#define GCSsweep 4
/*
** macro to tell when main invariant (white objects cannot point to black
** ones) must be kept. During a collection, the sweep
** phase may break the invariant, as objects turned white may point to
** still-black objects. The invariant is restored when sweep ends and
** all objects are white again.
*/
#define keepinvariant(g) ((g)->gcstate == GCSpropagate || (g)->gcstate == GCSpropagateagain || (g)->gcstate == GCSatomic)
/*
** some useful bit tricks
*/
#define resetbits(x, m) ((x) &= cast_to(uint8_t, ~(m)))
#define setbits(x, m) ((x) |= (m))
#define testbits(x, m) ((x) & (m))
#define bitmask(b) (1 << (b))
#define bit2mask(b1, b2) (bitmask(b1) | bitmask(b2))
#define l_setbit(x, b) setbits(x, bitmask(b))
#define resetbit(x, b) resetbits(x, bitmask(b))
#define testbit(x, b) testbits(x, bitmask(b))
#define set2bits(x, b1, b2) setbits(x, (bit2mask(b1, b2)))
#define reset2bits(x, b1, b2) resetbits(x, (bit2mask(b1, b2)))
#define test2bits(x, b1, b2) testbits(x, (bit2mask(b1, b2)))
/*
** Layout for bit use in `marked' field:
** bit 0 - object is white (type 0)
** bit 1 - object is white (type 1)
** bit 2 - object is black
** bit 3 - object is fixed (should not be collected)
*/
#define WHITE0BIT 0
#define WHITE1BIT 1
#define BLACKBIT 2
#define FIXEDBIT 3
#define WHITEBITS bit2mask(WHITE0BIT, WHITE1BIT)
#define iswhite(x) test2bits((x)->gch.marked, WHITE0BIT, WHITE1BIT)
#define isblack(x) testbit((x)->gch.marked, BLACKBIT)
#define isgray(x) (!testbits((x)->gch.marked, WHITEBITS | bitmask(BLACKBIT)))
#define isfixed(x) testbit((x)->gch.marked, FIXEDBIT)
#define otherwhite(g) (g->currentwhite ^ WHITEBITS)
#define isdead(g, v) (((v)->gch.marked & (WHITEBITS | bitmask(FIXEDBIT))) == (otherwhite(g) & WHITEBITS))
#define changewhite(x) ((x)->gch.marked ^= WHITEBITS)
#define gray2black(x) l_setbit((x)->gch.marked, BLACKBIT)
#define luaC_white(g) cast_to(uint8_t, ((g)->currentwhite) & WHITEBITS)
#define luaC_checkGC(L) \
{ \
condhardstacktests(luaD_reallocstack(L, L->stacksize - EXTRA_STACK)); \
if (L->global->totalbytes >= L->global->GCthreshold) \
{ \
condhardmemtests(luaC_validate(L), 1); \
luaC_step(L, true); \
} \
else \
{ \
condhardmemtests(luaC_validate(L), 2); \
} \
}
#define luaC_barrier(L, p, v) \
{ \
if (iscollectable(v) && isblack(obj2gco(p)) && iswhite(gcvalue(v))) \
luaC_barrierf(L, obj2gco(p), gcvalue(v)); \
}
#define luaC_barriert(L, t, v) \
{ \
if (iscollectable(v) && isblack(obj2gco(t)) && iswhite(gcvalue(v))) \
luaC_barriertable(L, t, gcvalue(v)); \
}
#define luaC_barrierfast(L, t) \
{ \
if (isblack(obj2gco(t))) \
luaC_barrierback(L, obj2gco(t), &t->gclist); \
}
#define luaC_objbarrier(L, p, o) \
{ \
if (isblack(obj2gco(p)) && iswhite(obj2gco(o))) \
luaC_barrierf(L, obj2gco(p), obj2gco(o)); \
}
#define luaC_threadbarrier(L) \
{ \
if (isblack(obj2gco(L))) \
luaC_barrierback(L, obj2gco(L), &L->gclist); \
}
#define luaC_init(L, o, tt_) \
{ \
o->marked = luaC_white(L->global); \
o->tt = tt_; \
o->memcat = L->activememcat; \
}
LUAI_FUNC void luaC_freeall(lua_State* L);
LUAI_FUNC size_t luaC_step(lua_State* L, bool assist);
LUAI_FUNC void luaC_fullgc(lua_State* L);
LUAI_FUNC void luaC_initobj(lua_State* L, GCObject* o, uint8_t tt);
LUAI_FUNC void luaC_upvalclosed(lua_State* L, UpVal* uv);
LUAI_FUNC void luaC_barrierf(lua_State* L, GCObject* o, GCObject* v);
LUAI_FUNC void luaC_barriertable(lua_State* L, Table* t, GCObject* v);
LUAI_FUNC void luaC_barrierback(lua_State* L, GCObject* o, GCObject** gclist);
LUAI_FUNC void luaC_validate(lua_State* L);
LUAI_FUNC void luaC_dump(lua_State* L, void* file, const char* (*categoryName)(lua_State* L, uint8_t memcat));
LUAI_FUNC int64_t luaC_allocationrate(lua_State* L);
LUAI_FUNC const char* luaC_statename(int state);

View File

@ -1,604 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lgc.h"
#include "lfunc.h"
#include "lmem.h"
#include "lobject.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ludata.h"
#include <string.h>
#include <stdio.h>
static void validateobjref(global_State* g, GCObject* f, GCObject* t)
{
LUAU_ASSERT(!isdead(g, t));
if (keepinvariant(g))
{
// basic incremental invariant: black can't point to white
LUAU_ASSERT(!(isblack(f) && iswhite(t)));
}
}
static void validateref(global_State* g, GCObject* f, TValue* v)
{
if (iscollectable(v))
{
LUAU_ASSERT(ttype(v) == gcvalue(v)->gch.tt);
validateobjref(g, f, gcvalue(v));
}
}
static void validatetable(global_State* g, Table* h)
{
int sizenode = 1 << h->lsizenode;
LUAU_ASSERT(h->lastfree <= sizenode);
if (h->metatable)
validateobjref(g, obj2gco(h), obj2gco(h->metatable));
for (int i = 0; i < h->sizearray; ++i)
validateref(g, obj2gco(h), &h->array[i]);
for (int i = 0; i < sizenode; ++i)
{
LuaNode* n = &h->node[i];
LUAU_ASSERT(ttype(gkey(n)) != LUA_TDEADKEY || ttisnil(gval(n)));
LUAU_ASSERT(i + gnext(n) >= 0 && i + gnext(n) < sizenode);
if (!ttisnil(gval(n)))
{
TValue k = {};
k.tt = gkey(n)->tt;
k.value = gkey(n)->value;
validateref(g, obj2gco(h), &k);
validateref(g, obj2gco(h), gval(n));
}
}
}
static void validateclosure(global_State* g, Closure* cl)
{
validateobjref(g, obj2gco(cl), obj2gco(cl->env));
if (cl->isC)
{
for (int i = 0; i < cl->nupvalues; ++i)
validateref(g, obj2gco(cl), &cl->c.upvals[i]);
}
else
{
LUAU_ASSERT(cl->nupvalues == cl->l.p->nups);
validateobjref(g, obj2gco(cl), obj2gco(cl->l.p));
for (int i = 0; i < cl->nupvalues; ++i)
validateref(g, obj2gco(cl), &cl->l.uprefs[i]);
}
}
static void validatestack(global_State* g, lua_State* l)
{
validateobjref(g, obj2gco(l), obj2gco(l->gt));
for (CallInfo* ci = l->base_ci; ci <= l->ci; ++ci)
{
LUAU_ASSERT(l->stack <= ci->base);
LUAU_ASSERT(ci->func <= ci->base && ci->base <= ci->top);
LUAU_ASSERT(ci->top <= l->stack_last);
}
// note: stack refs can violate gc invariant so we only check for liveness
for (StkId o = l->stack; o < l->top; ++o)
checkliveness(g, o);
if (l->namecall)
validateobjref(g, obj2gco(l), obj2gco(l->namecall));
for (UpVal* uv = l->openupval; uv; uv = uv->u.open.threadnext)
{
LUAU_ASSERT(uv->tt == LUA_TUPVAL);
LUAU_ASSERT(upisopen(uv));
LUAU_ASSERT(uv->u.open.next->u.open.prev == uv && uv->u.open.prev->u.open.next == uv);
LUAU_ASSERT(!isblack(obj2gco(uv))); // open upvalues are never black
}
}
static void validateproto(global_State* g, Proto* f)
{
if (f->source)
validateobjref(g, obj2gco(f), obj2gco(f->source));
if (f->debugname)
validateobjref(g, obj2gco(f), obj2gco(f->debugname));
for (int i = 0; i < f->sizek; ++i)
validateref(g, obj2gco(f), &f->k[i]);
for (int i = 0; i < f->sizeupvalues; ++i)
if (f->upvalues[i])
validateobjref(g, obj2gco(f), obj2gco(f->upvalues[i]));
for (int i = 0; i < f->sizep; ++i)
if (f->p[i])
validateobjref(g, obj2gco(f), obj2gco(f->p[i]));
for (int i = 0; i < f->sizelocvars; i++)
if (f->locvars[i].varname)
validateobjref(g, obj2gco(f), obj2gco(f->locvars[i].varname));
}
static void validateobj(global_State* g, GCObject* o)
{
// dead objects can only occur during sweep
if (isdead(g, o))
{
LUAU_ASSERT(g->gcstate == GCSsweep);
return;
}
switch (o->gch.tt)
{
case LUA_TSTRING:
break;
case LUA_TTABLE:
validatetable(g, gco2h(o));
break;
case LUA_TFUNCTION:
validateclosure(g, gco2cl(o));
break;
case LUA_TUSERDATA:
if (gco2u(o)->metatable)
validateobjref(g, o, obj2gco(gco2u(o)->metatable));
break;
case LUA_TTHREAD:
validatestack(g, gco2th(o));
break;
case LUA_TPROTO:
validateproto(g, gco2p(o));
break;
case LUA_TUPVAL:
validateref(g, o, gco2uv(o)->v);
break;
default:
LUAU_ASSERT(!"unexpected object type");
}
}
static void validategraylist(global_State* g, GCObject* o)
{
if (!keepinvariant(g))
return;
while (o)
{
LUAU_ASSERT(isgray(o));
switch (o->gch.tt)
{
case LUA_TTABLE:
o = gco2h(o)->gclist;
break;
case LUA_TFUNCTION:
o = gco2cl(o)->gclist;
break;
case LUA_TTHREAD:
o = gco2th(o)->gclist;
break;
case LUA_TPROTO:
o = gco2p(o)->gclist;
break;
default:
LUAU_ASSERT(!"unknown object in gray list");
return;
}
}
}
static bool validategco(void* context, lua_Page* page, GCObject* gco)
{
lua_State* L = (lua_State*)context;
global_State* g = L->global;
validateobj(g, gco);
return false;
}
void luaC_validate(lua_State* L)
{
global_State* g = L->global;
LUAU_ASSERT(!isdead(g, obj2gco(g->mainthread)));
checkliveness(g, &g->registry);
for (int i = 0; i < LUA_T_COUNT; ++i)
if (g->mt[i])
LUAU_ASSERT(!isdead(g, obj2gco(g->mt[i])));
validategraylist(g, g->weak);
validategraylist(g, g->gray);
validategraylist(g, g->grayagain);
validategco(L, NULL, obj2gco(g->mainthread));
luaM_visitgco(L, L, validategco);
for (UpVal* uv = g->uvhead.u.open.next; uv != &g->uvhead; uv = uv->u.open.next)
{
LUAU_ASSERT(uv->tt == LUA_TUPVAL);
LUAU_ASSERT(upisopen(uv));
LUAU_ASSERT(uv->u.open.next->u.open.prev == uv && uv->u.open.prev->u.open.next == uv);
LUAU_ASSERT(!isblack(obj2gco(uv))); // open upvalues are never black
}
}
inline bool safejson(char ch)
{
return unsigned(ch) < 128 && ch >= 32 && ch != '\\' && ch != '\"';
}
static void dumpref(FILE* f, GCObject* o)
{
fprintf(f, "\"%p\"", o);
}
static void dumprefs(FILE* f, TValue* data, size_t size)
{
bool first = true;
for (size_t i = 0; i < size; ++i)
{
if (iscollectable(&data[i]))
{
if (!first)
fputc(',', f);
first = false;
dumpref(f, gcvalue(&data[i]));
}
}
}
static void dumpstringdata(FILE* f, const char* data, size_t len)
{
for (size_t i = 0; i < len; ++i)
fputc(safejson(data[i]) ? data[i] : '?', f);
}
static void dumpstring(FILE* f, TString* ts)
{
fprintf(f, "{\"type\":\"string\",\"cat\":%d,\"size\":%d,\"data\":\"", ts->memcat, int(sizestring(ts->len)));
dumpstringdata(f, ts->data, ts->len);
fprintf(f, "\"}");
}
static void dumptable(FILE* f, Table* h)
{
size_t size = sizeof(Table) + (h->node == &luaH_dummynode ? 0 : sizenode(h) * sizeof(LuaNode)) + h->sizearray * sizeof(TValue);
fprintf(f, "{\"type\":\"table\",\"cat\":%d,\"size\":%d", h->memcat, int(size));
if (h->node != &luaH_dummynode)
{
fprintf(f, ",\"pairs\":[");
bool first = true;
for (int i = 0; i < sizenode(h); ++i)
{
const LuaNode& n = h->node[i];
if (!ttisnil(&n.val) && (iscollectable(&n.key) || iscollectable(&n.val)))
{
if (!first)
fputc(',', f);
first = false;
if (iscollectable(&n.key))
dumpref(f, gcvalue(&n.key));
else
fprintf(f, "null");
fputc(',', f);
if (iscollectable(&n.val))
dumpref(f, gcvalue(&n.val));
else
fprintf(f, "null");
}
}
fprintf(f, "]");
}
if (h->sizearray)
{
fprintf(f, ",\"array\":[");
dumprefs(f, h->array, h->sizearray);
fprintf(f, "]");
}
if (h->metatable)
{
fprintf(f, ",\"metatable\":");
dumpref(f, obj2gco(h->metatable));
}
fprintf(f, "}");
}
static void dumpclosure(FILE* f, Closure* cl)
{
fprintf(f, "{\"type\":\"function\",\"cat\":%d,\"size\":%d", cl->memcat,
cl->isC ? int(sizeCclosure(cl->nupvalues)) : int(sizeLclosure(cl->nupvalues)));
fprintf(f, ",\"env\":");
dumpref(f, obj2gco(cl->env));
if (cl->isC)
{
if (cl->c.debugname)
fprintf(f, ",\"name\":\"%s\"", cl->c.debugname + 0);
if (cl->nupvalues)
{
fprintf(f, ",\"upvalues\":[");
dumprefs(f, cl->c.upvals, cl->nupvalues);
fprintf(f, "]");
}
}
else
{
if (cl->l.p->debugname)
fprintf(f, ",\"name\":\"%s\"", getstr(cl->l.p->debugname));
fprintf(f, ",\"proto\":");
dumpref(f, obj2gco(cl->l.p));
if (cl->nupvalues)
{
fprintf(f, ",\"upvalues\":[");
dumprefs(f, cl->l.uprefs, cl->nupvalues);
fprintf(f, "]");
}
}
fprintf(f, "}");
}
static void dumpudata(FILE* f, Udata* u)
{
fprintf(f, "{\"type\":\"userdata\",\"cat\":%d,\"size\":%d,\"tag\":%d", u->memcat, int(sizeudata(u->len)), u->tag);
if (u->metatable)
{
fprintf(f, ",\"metatable\":");
dumpref(f, obj2gco(u->metatable));
}
fprintf(f, "}");
}
static void dumpthread(FILE* f, lua_State* th)
{
size_t size = sizeof(lua_State) + sizeof(TValue) * th->stacksize + sizeof(CallInfo) * th->size_ci;
fprintf(f, "{\"type\":\"thread\",\"cat\":%d,\"size\":%d", th->memcat, int(size));
fprintf(f, ",\"env\":");
dumpref(f, obj2gco(th->gt));
Closure* tcl = 0;
for (CallInfo* ci = th->base_ci; ci <= th->ci; ++ci)
{
if (ttisfunction(ci->func))
{
tcl = clvalue(ci->func);
break;
}
}
if (tcl && !tcl->isC && tcl->l.p->source)
{
Proto* p = tcl->l.p;
fprintf(f, ",\"source\":\"");
dumpstringdata(f, p->source->data, p->source->len);
fprintf(f, "\",\"line\":%d", p->linedefined);
}
if (th->top > th->stack)
{
fprintf(f, ",\"stack\":[");
dumprefs(f, th->stack, th->top - th->stack);
fprintf(f, "]");
CallInfo* ci = th->base_ci;
bool first = true;
fprintf(f, ",\"stacknames\":[");
for (StkId v = th->stack; v < th->top; ++v)
{
if (!iscollectable(v))
continue;
while (ci < th->ci && v >= (ci + 1)->func)
ci++;
if (!first)
fputc(',', f);
first = false;
if (v == ci->func)
{
Closure* cl = ci_func(ci);
if (cl->isC)
{
fprintf(f, "\"frame:%s\"", cl->c.debugname ? cl->c.debugname : "[C]");
}
else
{
Proto* p = cl->l.p;
fprintf(f, "\"frame:");
if (p->source)
dumpstringdata(f, p->source->data, p->source->len);
fprintf(f, ":%d:%s\"", p->linedefined, p->debugname ? getstr(p->debugname) : "");
}
}
else if (isLua(ci))
{
Proto* p = ci_func(ci)->l.p;
int pc = pcRel(ci->savedpc, p);
const LocVar* var = luaF_findlocal(p, int(v - ci->base), pc);
if (var && var->varname)
fprintf(f, "\"%s\"", getstr(var->varname));
else
fprintf(f, "null");
}
else
fprintf(f, "null");
}
fprintf(f, "]");
}
fprintf(f, "}");
}
static void dumpproto(FILE* f, Proto* p)
{
size_t size = sizeof(Proto) + sizeof(Instruction) * p->sizecode + sizeof(Proto*) * p->sizep + sizeof(TValue) * p->sizek + p->sizelineinfo +
sizeof(LocVar) * p->sizelocvars + sizeof(TString*) * p->sizeupvalues;
fprintf(f, "{\"type\":\"proto\",\"cat\":%d,\"size\":%d", p->memcat, int(size));
if (p->source)
{
fprintf(f, ",\"source\":\"");
dumpstringdata(f, p->source->data, p->source->len);
fprintf(f, "\",\"line\":%d", p->abslineinfo ? p->abslineinfo[0] : 0);
}
if (p->sizek)
{
fprintf(f, ",\"constants\":[");
dumprefs(f, p->k, p->sizek);
fprintf(f, "]");
}
if (p->sizep)
{
fprintf(f, ",\"protos\":[");
for (int i = 0; i < p->sizep; ++i)
{
if (i != 0)
fputc(',', f);
dumpref(f, obj2gco(p->p[i]));
}
fprintf(f, "]");
}
fprintf(f, "}");
}
static void dumpupval(FILE* f, UpVal* uv)
{
fprintf(f, "{\"type\":\"upvalue\",\"cat\":%d,\"size\":%d,\"open\":%s", uv->memcat, int(sizeof(UpVal)), upisopen(uv) ? "true" : "false");
if (iscollectable(uv->v))
{
fprintf(f, ",\"object\":");
dumpref(f, gcvalue(uv->v));
}
fprintf(f, "}");
}
static void dumpobj(FILE* f, GCObject* o)
{
switch (o->gch.tt)
{
case LUA_TSTRING:
return dumpstring(f, gco2ts(o));
case LUA_TTABLE:
return dumptable(f, gco2h(o));
case LUA_TFUNCTION:
return dumpclosure(f, gco2cl(o));
case LUA_TUSERDATA:
return dumpudata(f, gco2u(o));
case LUA_TTHREAD:
return dumpthread(f, gco2th(o));
case LUA_TPROTO:
return dumpproto(f, gco2p(o));
case LUA_TUPVAL:
return dumpupval(f, gco2uv(o));
default:
LUAU_ASSERT(0);
}
}
static bool dumpgco(void* context, lua_Page* page, GCObject* gco)
{
FILE* f = (FILE*)context;
dumpref(f, gco);
fputc(':', f);
dumpobj(f, gco);
fputc(',', f);
fputc('\n', f);
return false;
}
void luaC_dump(lua_State* L, void* file, const char* (*categoryName)(lua_State* L, uint8_t memcat))
{
global_State* g = L->global;
FILE* f = static_cast<FILE*>(file);
fprintf(f, "{\"objects\":{\n");
dumpgco(f, NULL, obj2gco(g->mainthread));
luaM_visitgco(L, f, dumpgco);
fprintf(f, "\"0\":{\"type\":\"userdata\",\"cat\":0,\"size\":0}\n"); // to avoid issues with trailing ,
fprintf(f, "},\"roots\":{\n");
fprintf(f, "\"mainthread\":");
dumpref(f, obj2gco(g->mainthread));
fprintf(f, ",\"registry\":");
dumpref(f, gcvalue(&g->registry));
fprintf(f, "},\"stats\":{\n");
fprintf(f, "\"size\":%d,\n", int(g->totalbytes));
fprintf(f, "\"categories\":{\n");
for (int i = 0; i < LUA_MEMORY_CATEGORIES; i++)
{
if (size_t bytes = g->memcatbytes[i])
{
if (categoryName)
fprintf(f, "\"%d\":{\"name\":\"%s\", \"size\":%d},\n", i, categoryName(L, i), int(bytes));
else
fprintf(f, "\"%d\":{\"size\":%d},\n", i, int(bytes));
}
}
fprintf(f, "\"none\":{}\n"); // to avoid issues with trailing ,
fprintf(f, "}\n");
fprintf(f, "}}\n");
}

View File

@ -1,87 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include <stdlib.h>
static const luaL_Reg lualibs[] = {
{"", luaopen_base},
{LUA_COLIBNAME, luaopen_coroutine},
{LUA_TABLIBNAME, luaopen_table},
{LUA_OSLIBNAME, luaopen_os},
{LUA_STRLIBNAME, luaopen_string},
{LUA_MATHLIBNAME, luaopen_math},
{LUA_DBLIBNAME, luaopen_debug},
{LUA_UTF8LIBNAME, luaopen_utf8},
{LUA_BITLIBNAME, luaopen_bit32},
{NULL, NULL},
};
void luaL_openlibs(lua_State* L)
{
const luaL_Reg* lib = lualibs;
for (; lib->func; lib++)
{
lua_pushcfunction(L, lib->func, NULL);
lua_pushstring(L, lib->name);
lua_call(L, 1, 0);
}
}
void luaL_sandbox(lua_State* L)
{
// set all libraries to read-only
lua_pushnil(L);
while (lua_next(L, LUA_GLOBALSINDEX) != 0)
{
if (lua_istable(L, -1))
lua_setreadonly(L, -1, true);
lua_pop(L, 1);
}
// set all builtin metatables to read-only
lua_pushliteral(L, "");
lua_getmetatable(L, -1);
lua_setreadonly(L, -1, true);
lua_pop(L, 2);
// set globals to readonly and activate safeenv since the env is immutable
lua_setreadonly(L, LUA_GLOBALSINDEX, true);
lua_setsafeenv(L, LUA_GLOBALSINDEX, true);
}
void luaL_sandboxthread(lua_State* L)
{
// create new global table that proxies reads to original table
lua_newtable(L);
lua_newtable(L);
lua_pushvalue(L, LUA_GLOBALSINDEX);
lua_setfield(L, -2, "__index");
lua_setreadonly(L, -1, true);
lua_setmetatable(L, -2);
// we can set safeenv now although it's important to set it to false if code is loaded twice into the thread
lua_replace(L, LUA_GLOBALSINDEX);
lua_setsafeenv(L, LUA_GLOBALSINDEX, true);
}
static void* l_alloc(void* ud, void* ptr, size_t osize, size_t nsize)
{
(void)ud;
(void)osize;
if (nsize == 0)
{
free(ptr);
return NULL;
}
else
return realloc(ptr, nsize);
}
lua_State* luaL_newstate(void)
{
return lua_newstate(l_alloc, NULL);
}

View File

@ -1,446 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include "lstate.h"
#include <math.h>
#include <time.h>
#undef PI
#define PI (3.14159265358979323846)
#define RADIANS_PER_DEGREE (PI / 180.0)
#define PCG32_INC 105
static uint32_t pcg32_random(uint64_t* state)
{
uint64_t oldstate = *state;
*state = oldstate * 6364136223846793005ULL + (PCG32_INC | 1);
uint32_t xorshifted = uint32_t(((oldstate >> 18u) ^ oldstate) >> 27u);
uint32_t rot = uint32_t(oldstate >> 59u);
return (xorshifted >> rot) | (xorshifted << ((-int32_t(rot)) & 31));
}
static void pcg32_seed(uint64_t* state, uint64_t seed)
{
*state = 0;
pcg32_random(state);
*state += seed;
pcg32_random(state);
}
static int math_abs(lua_State* L)
{
lua_pushnumber(L, fabs(luaL_checknumber(L, 1)));
return 1;
}
static int math_sin(lua_State* L)
{
lua_pushnumber(L, sin(luaL_checknumber(L, 1)));
return 1;
}
static int math_sinh(lua_State* L)
{
lua_pushnumber(L, sinh(luaL_checknumber(L, 1)));
return 1;
}
static int math_cos(lua_State* L)
{
lua_pushnumber(L, cos(luaL_checknumber(L, 1)));
return 1;
}
static int math_cosh(lua_State* L)
{
lua_pushnumber(L, cosh(luaL_checknumber(L, 1)));
return 1;
}
static int math_tan(lua_State* L)
{
lua_pushnumber(L, tan(luaL_checknumber(L, 1)));
return 1;
}
static int math_tanh(lua_State* L)
{
lua_pushnumber(L, tanh(luaL_checknumber(L, 1)));
return 1;
}
static int math_asin(lua_State* L)
{
lua_pushnumber(L, asin(luaL_checknumber(L, 1)));
return 1;
}
static int math_acos(lua_State* L)
{
lua_pushnumber(L, acos(luaL_checknumber(L, 1)));
return 1;
}
static int math_atan(lua_State* L)
{
lua_pushnumber(L, atan(luaL_checknumber(L, 1)));
return 1;
}
static int math_atan2(lua_State* L)
{
lua_pushnumber(L, atan2(luaL_checknumber(L, 1), luaL_checknumber(L, 2)));
return 1;
}
static int math_ceil(lua_State* L)
{
lua_pushnumber(L, ceil(luaL_checknumber(L, 1)));
return 1;
}
static int math_floor(lua_State* L)
{
lua_pushnumber(L, floor(luaL_checknumber(L, 1)));
return 1;
}
static int math_fmod(lua_State* L)
{
lua_pushnumber(L, fmod(luaL_checknumber(L, 1), luaL_checknumber(L, 2)));
return 1;
}
static int math_modf(lua_State* L)
{
double ip;
double fp = modf(luaL_checknumber(L, 1), &ip);
lua_pushnumber(L, ip);
lua_pushnumber(L, fp);
return 2;
}
static int math_sqrt(lua_State* L)
{
lua_pushnumber(L, sqrt(luaL_checknumber(L, 1)));
return 1;
}
static int math_pow(lua_State* L)
{
lua_pushnumber(L, pow(luaL_checknumber(L, 1), luaL_checknumber(L, 2)));
return 1;
}
static int math_log(lua_State* L)
{
double x = luaL_checknumber(L, 1);
double res;
if (lua_isnoneornil(L, 2))
res = log(x);
else
{
double base = luaL_checknumber(L, 2);
if (base == 2.0)
res = log2(x);
else if (base == 10.0)
res = log10(x);
else
res = log(x) / log(base);
}
lua_pushnumber(L, res);
return 1;
}
static int math_log10(lua_State* L)
{
lua_pushnumber(L, log10(luaL_checknumber(L, 1)));
return 1;
}
static int math_exp(lua_State* L)
{
lua_pushnumber(L, exp(luaL_checknumber(L, 1)));
return 1;
}
static int math_deg(lua_State* L)
{
lua_pushnumber(L, luaL_checknumber(L, 1) / RADIANS_PER_DEGREE);
return 1;
}
static int math_rad(lua_State* L)
{
lua_pushnumber(L, luaL_checknumber(L, 1) * RADIANS_PER_DEGREE);
return 1;
}
static int math_frexp(lua_State* L)
{
int e;
lua_pushnumber(L, frexp(luaL_checknumber(L, 1), &e));
lua_pushinteger(L, e);
return 2;
}
static int math_ldexp(lua_State* L)
{
lua_pushnumber(L, ldexp(luaL_checknumber(L, 1), luaL_checkinteger(L, 2)));
return 1;
}
static int math_min(lua_State* L)
{
int n = lua_gettop(L); // number of arguments
double dmin = luaL_checknumber(L, 1);
int i;
for (i = 2; i <= n; i++)
{
double d = luaL_checknumber(L, i);
if (d < dmin)
dmin = d;
}
lua_pushnumber(L, dmin);
return 1;
}
static int math_max(lua_State* L)
{
int n = lua_gettop(L); // number of arguments
double dmax = luaL_checknumber(L, 1);
int i;
for (i = 2; i <= n; i++)
{
double d = luaL_checknumber(L, i);
if (d > dmax)
dmax = d;
}
lua_pushnumber(L, dmax);
return 1;
}
static int math_random(lua_State* L)
{
global_State* g = L->global;
switch (lua_gettop(L))
{ // check number of arguments
case 0:
{ // no arguments
// Using ldexp instead of division for speed & clarity.
// See http://mumble.net/~campbell/tmp/random_real.c for details on generating doubles from integer ranges.
uint32_t rl = pcg32_random(&g->rngstate);
uint32_t rh = pcg32_random(&g->rngstate);
double rd = ldexp(double(rl | (uint64_t(rh) << 32)), -64);
lua_pushnumber(L, rd); // number between 0 and 1
break;
}
case 1:
{ // only upper limit
int u = luaL_checkinteger(L, 1);
luaL_argcheck(L, 1 <= u, 1, "interval is empty");
uint64_t x = uint64_t(u) * pcg32_random(&g->rngstate);
int r = int(1 + (x >> 32));
lua_pushinteger(L, r); // int between 1 and `u'
break;
}
case 2:
{ // lower and upper limits
int l = luaL_checkinteger(L, 1);
int u = luaL_checkinteger(L, 2);
luaL_argcheck(L, l <= u, 2, "interval is empty");
uint32_t ul = uint32_t(u) - uint32_t(l);
luaL_argcheck(L, ul < UINT_MAX, 2, "interval is too large"); // -INT_MIN..INT_MAX interval can result in integer overflow
uint64_t x = uint64_t(ul + 1) * pcg32_random(&g->rngstate);
int r = int(l + (x >> 32));
lua_pushinteger(L, r); // int between `l' and `u'
break;
}
default:
luaL_error(L, "wrong number of arguments");
}
return 1;
}
static int math_randomseed(lua_State* L)
{
int seed = luaL_checkinteger(L, 1);
pcg32_seed(&L->global->rngstate, seed);
return 0;
}
static const unsigned char kPerlin[512] = {151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99,
37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41,
55, 46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86,
164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17,
182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110,
79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249, 14,
239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24,
72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180,
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247,
120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74,
165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65,
25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52,
217, 226, 250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213,
119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232, 178, 185, 112,
104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31, 181, 199,
106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61,
156, 180};
static float fade(float t)
{
return t * t * t * (t * (t * 6 - 15) + 10);
}
static float math_lerp(float t, float a, float b)
{
return a + t * (b - a);
}
static float grad(unsigned char hash, float x, float y, float z)
{
unsigned char h = hash & 15;
float u = (h < 8) ? x : y;
float v = (h < 4) ? y : (h == 12 || h == 14) ? x : z;
return (h & 1 ? -u : u) + (h & 2 ? -v : v);
}
static float perlin(float x, float y, float z)
{
float xflr = floorf(x);
float yflr = floorf(y);
float zflr = floorf(z);
int xi = int(xflr) & 255;
int yi = int(yflr) & 255;
int zi = int(zflr) & 255;
float xf = x - xflr;
float yf = y - yflr;
float zf = z - zflr;
float u = fade(xf);
float v = fade(yf);
float w = fade(zf);
const unsigned char* p = kPerlin;
int a = p[xi] + yi;
int aa = p[a] + zi;
int ab = p[a + 1] + zi;
int b = p[xi + 1] + yi;
int ba = p[b] + zi;
int bb = p[b + 1] + zi;
return math_lerp(w,
math_lerp(v, math_lerp(u, grad(p[aa], xf, yf, zf), grad(p[ba], xf - 1, yf, zf)),
math_lerp(u, grad(p[ab], xf, yf - 1, zf), grad(p[bb], xf - 1, yf - 1, zf))),
math_lerp(v, math_lerp(u, grad(p[aa + 1], xf, yf, zf - 1), grad(p[ba + 1], xf - 1, yf, zf - 1)),
math_lerp(u, grad(p[ab + 1], xf, yf - 1, zf - 1), grad(p[bb + 1], xf - 1, yf - 1, zf - 1))));
}
static int math_noise(lua_State* L)
{
double x = luaL_checknumber(L, 1);
double y = luaL_optnumber(L, 2, 0.0);
double z = luaL_optnumber(L, 3, 0.0);
double r = perlin((float)x, (float)y, (float)z);
lua_pushnumber(L, r);
return 1;
}
static int math_clamp(lua_State* L)
{
double v = luaL_checknumber(L, 1);
double min = luaL_checknumber(L, 2);
double max = luaL_checknumber(L, 3);
luaL_argcheck(L, min <= max, 3, "max must be greater than or equal to min");
double r = v < min ? min : v;
r = r > max ? max : r;
lua_pushnumber(L, r);
return 1;
}
static int math_sign(lua_State* L)
{
double v = luaL_checknumber(L, 1);
lua_pushnumber(L, v > 0.0 ? 1.0 : v < 0.0 ? -1.0 : 0.0);
return 1;
}
static int math_round(lua_State* L)
{
lua_pushnumber(L, round(luaL_checknumber(L, 1)));
return 1;
}
static const luaL_Reg mathlib[] = {
{"abs", math_abs},
{"acos", math_acos},
{"asin", math_asin},
{"atan2", math_atan2},
{"atan", math_atan},
{"ceil", math_ceil},
{"cosh", math_cosh},
{"cos", math_cos},
{"deg", math_deg},
{"exp", math_exp},
{"floor", math_floor},
{"fmod", math_fmod},
{"frexp", math_frexp},
{"ldexp", math_ldexp},
{"log10", math_log10},
{"log", math_log},
{"max", math_max},
{"min", math_min},
{"modf", math_modf},
{"pow", math_pow},
{"rad", math_rad},
{"random", math_random},
{"randomseed", math_randomseed},
{"sinh", math_sinh},
{"sin", math_sin},
{"sqrt", math_sqrt},
{"tanh", math_tanh},
{"tan", math_tan},
{"noise", math_noise},
{"clamp", math_clamp},
{"sign", math_sign},
{"round", math_round},
{NULL, NULL},
};
/*
** Open math library
*/
int luaopen_math(lua_State* L)
{
uint64_t seed = uintptr_t(L);
seed ^= time(NULL);
seed ^= clock();
pcg32_seed(&L->global->rngstate, seed);
luaL_register(L, LUA_MATHLIBNAME, mathlib);
lua_pushnumber(L, PI);
lua_setfield(L, -2, "pi");
lua_pushnumber(L, HUGE_VAL);
lua_setfield(L, -2, "huge");
return 1;
}

View File

@ -1,646 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lmem.h"
#include "lstate.h"
#include "ldo.h"
#include "ldebug.h"
#include <string.h>
/*
* Luau heap uses a size-segregated page structure, with individual pages and large allocations
* allocated using system heap (via frealloc callback).
*
* frealloc callback serves as a general, if slow, allocation callback that can allocate, free or
* resize allocations:
*
* void* frealloc(void* ud, void* ptr, size_t oldsize, size_t newsize);
*
* frealloc(ud, NULL, 0, x) creates a new block of size x
* frealloc(ud, p, x, 0) frees the block p (must return NULL)
* frealloc(ud, NULL, 0, 0) does nothing, equivalent to free(NULL)
*
* frealloc returns NULL if it cannot create or reallocate the area
* (any reallocation to an equal or smaller size cannot fail!)
*
* On top of this, Luau implements heap storage which is split into two types of allocations:
*
* - GCO, short for "garbage collected objects"
* - other objects (for example, arrays stored inside table objects)
*
* The heap layout for these two allocation types is a bit different.
*
* All GCO are allocated in pages, which is a block of memory of ~16K in size that has a page header
* (lua_Page). Each page contains 1..N blocks of the same size, where N is selected to fill the page
* completely. This amortizes the allocation cost and increases locality. Each GCO block starts with
* the GC header (GCheader) which contains the object type, mark bits and other GC metadata. If the
* GCO block is free (not used), then it must have the type set to TNIL; in this case the block can
* be part of the per-page free list, the link for that list is stored after the header (freegcolink).
*
* Importantly, the GCO block doesn't have any back references to the page it's allocated in, so it's
* impossible to free it in isolation - GCO blocks are freed by sweeping the pages they belong to,
* using luaM_freegco which must specify the page; this is called by page sweeper that traverses the
* entire page's worth of objects. For this reason it's also important that freed GCO blocks keep the
* GC header intact and accessible (with type = NIL) so that the sweeper can access it.
*
* Some GCOs are too large to fit in a 16K page without excessive fragmentation (the size threshold is
* currently 512 bytes); in this case, we allocate a dedicated small page with just a single block's worth
* storage space, but that requires allocating an extra page header. In effect large GCOs are a little bit
* less memory efficient, but this allows us to uniformly sweep small and large GCOs using page lists.
*
* All GCO pages are linked in a large intrusive linked list (global_State::allgcopages). Additionally,
* for each block size there's a page free list that contains pages that have at least one free block
* (global_State::freegcopages). This free list is used to make sure object allocation is O(1).
*
* Compared to GCOs, regular allocations have two important differences: they can be freed in isolation,
* and they don't start with a GC header. Because of this, each allocation is prefixed with block metadata,
* which contains the pointer to the page for allocated blocks, and the pointer to the next free block
* inside the page for freed blocks.
* For regular allocations that are too large to fit in a page (using the same threshold of 512 bytes),
* we don't allocate a separate page, instead simply using frealloc to allocate a vanilla block of memory.
*
* Just like GCO pages, we store a page free list (global_State::freepages) that allows O(1) allocation;
* there is no global list for non-GCO pages since we never need to traverse them directly.
*
* In both cases, we pick the page by computing the size class from the block size which rounds the block
* size up to reduce the chance that we'll allocate pages that have very few allocated blocks. The size
* class strategy is determined by SizeClassConfig constructor.
*
* Note that when the last block in a page is freed, we immediately free the page with frealloc - the
* memory manager doesn't currently attempt to keep unused memory around. This can result in excessive
* allocation traffic and can be mitigated by adding a page cache in the future.
*
* For both GCO and non-GCO pages, the per-page block allocation combines bump pointer style allocation
* (lua_Page::freeNext) and per-page free list (lua_Page::freeList). We use the bump allocator to allocate
* the contents of the page, and the free list for further reuse; this allows shorter page setup times
* which results in less variance between allocation cost, as well as tighter sweep bounds for newly
* allocated pages.
*/
#ifndef __has_feature
#define __has_feature(x) 0
#endif
#if __has_feature(address_sanitizer) || defined(LUAU_ENABLE_ASAN)
#include <sanitizer/asan_interface.h>
#define ASAN_POISON_MEMORY_REGION(addr, size) __asan_poison_memory_region((addr), (size))
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) __asan_unpoison_memory_region((addr), (size))
#else
#define ASAN_POISON_MEMORY_REGION(addr, size) (void)0
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) (void)0
#endif
/*
* The sizes of Luau objects aren't crucial for code correctness, but they are crucial for memory efficiency
* To prevent some of them accidentally growing and us losing memory without realizing it, we're going to lock
* the sizes of all critical structures down.
*/
#if defined(__APPLE__)
#define ABISWITCH(x64, ms32, gcc32) (sizeof(void*) == 8 ? x64 : gcc32)
#elif defined(__i386__) && !defined(_MSC_VER)
#define ABISWITCH(x64, ms32, gcc32) (gcc32)
#else
// Android somehow uses a similar ABI to MSVC, *not* to iOS...
#define ABISWITCH(x64, ms32, gcc32) (sizeof(void*) == 8 ? x64 : ms32)
#endif
#if LUA_VECTOR_SIZE == 4
static_assert(sizeof(TValue) == ABISWITCH(24, 24, 24), "size mismatch for value");
static_assert(sizeof(LuaNode) == ABISWITCH(48, 48, 48), "size mismatch for table entry");
#else
static_assert(sizeof(TValue) == ABISWITCH(16, 16, 16), "size mismatch for value");
static_assert(sizeof(LuaNode) == ABISWITCH(32, 32, 32), "size mismatch for table entry");
#endif
static_assert(offsetof(TString, data) == ABISWITCH(24, 20, 20), "size mismatch for string header");
static_assert(offsetof(Udata, data) == ABISWITCH(16, 16, 12), "size mismatch for userdata header");
static_assert(sizeof(Table) == ABISWITCH(48, 32, 32), "size mismatch for table header");
const size_t kSizeClasses = LUA_SIZECLASSES;
const size_t kMaxSmallSize = 512;
const size_t kPageSize = 16 * 1024 - 24; // slightly under 16KB since that results in less fragmentation due to heap metadata
const size_t kBlockHeader = sizeof(double) > sizeof(void*) ? sizeof(double) : sizeof(void*); // suitable for aligning double & void* on all platforms
const size_t kGCOLinkOffset = (sizeof(GCheader) + sizeof(void*) - 1) & ~(sizeof(void*) - 1); // GCO pages contain freelist links after the GC header
struct SizeClassConfig
{
int sizeOfClass[kSizeClasses];
int8_t classForSize[kMaxSmallSize + 1];
int classCount = 0;
SizeClassConfig()
{
memset(sizeOfClass, 0, sizeof(sizeOfClass));
memset(classForSize, -1, sizeof(classForSize));
// we use a progressive size class scheme:
// - all size classes are aligned by 8b to satisfy pointer alignment requirements
// - we first allocate sizes classes in multiples of 8
// - after the first cutoff we allocate size classes in multiples of 16
// - after the second cutoff we allocate size classes in multiples of 32
// this balances internal fragmentation vs external fragmentation
for (int size = 8; size < 64; size += 8)
sizeOfClass[classCount++] = size;
for (int size = 64; size < 256; size += 16)
sizeOfClass[classCount++] = size;
for (int size = 256; size <= 512; size += 32)
sizeOfClass[classCount++] = size;
LUAU_ASSERT(size_t(classCount) <= kSizeClasses);
// fill the lookup table for all classes
for (int klass = 0; klass < classCount; ++klass)
classForSize[sizeOfClass[klass]] = int8_t(klass);
// fill the gaps in lookup table
for (int size = kMaxSmallSize - 1; size >= 0; --size)
if (classForSize[size] < 0)
classForSize[size] = classForSize[size + 1];
}
};
const SizeClassConfig kSizeClassConfig;
// size class for a block of size sz; returns -1 for size=0 because empty allocations take no space
#define sizeclass(sz) (size_t((sz)-1) < kMaxSmallSize ? kSizeClassConfig.classForSize[sz] : -1)
// metadata for a block is stored in the first pointer of the block
#define metadata(block) (*(void**)(block))
#define freegcolink(block) (*(void**)((char*)block + kGCOLinkOffset))
struct lua_Page
{
// list of pages with free blocks
lua_Page* prev;
lua_Page* next;
// list of all gco pages
lua_Page* gcolistprev;
lua_Page* gcolistnext;
int pageSize; // page size in bytes, including page header
int blockSize; // block size in bytes, including block header (for non-GCO)
void* freeList; // next free block in this page; linked with metadata()/freegcolink()
int freeNext; // next free block offset in this page, in bytes; when negative, freeList is used instead
int busyBlocks; // number of blocks allocated out of this page
union
{
char data[1];
double align1;
void* align2;
};
};
l_noret luaM_toobig(lua_State* L)
{
luaG_runerror(L, "memory allocation error: block too big");
}
static lua_Page* newpage(lua_State* L, lua_Page** gcopageset, int pageSize, int blockSize, int blockCount)
{
global_State* g = L->global;
LUAU_ASSERT(pageSize - int(offsetof(lua_Page, data)) >= blockSize * blockCount);
lua_Page* page = (lua_Page*)(*g->frealloc)(g->ud, NULL, 0, pageSize);
if (!page)
luaD_throw(L, LUA_ERRMEM);
ASAN_POISON_MEMORY_REGION(page->data, blockSize * blockCount);
// setup page header
page->prev = NULL;
page->next = NULL;
page->gcolistprev = NULL;
page->gcolistnext = NULL;
page->pageSize = pageSize;
page->blockSize = blockSize;
// note: we start with the last block in the page and move downward
// either order would work, but that way we don't need to store the block count in the page
// additionally, GC stores objects in singly linked lists, and this way the GC lists end up in increasing pointer order
page->freeList = NULL;
page->freeNext = (blockCount - 1) * blockSize;
page->busyBlocks = 0;
if (gcopageset)
{
page->gcolistnext = *gcopageset;
if (page->gcolistnext)
page->gcolistnext->gcolistprev = page;
*gcopageset = page;
}
return page;
}
static lua_Page* newclasspage(lua_State* L, lua_Page** freepageset, lua_Page** gcopageset, uint8_t sizeClass, bool storeMetadata)
{
int blockSize = kSizeClassConfig.sizeOfClass[sizeClass] + (storeMetadata ? kBlockHeader : 0);
int blockCount = (kPageSize - offsetof(lua_Page, data)) / blockSize;
lua_Page* page = newpage(L, gcopageset, kPageSize, blockSize, blockCount);
// prepend a page to page freelist (which is empty because we only ever allocate a new page when it is!)
LUAU_ASSERT(!freepageset[sizeClass]);
freepageset[sizeClass] = page;
return page;
}
static void freepage(lua_State* L, lua_Page** gcopageset, lua_Page* page)
{
global_State* g = L->global;
if (gcopageset)
{
// remove page from alllist
if (page->gcolistnext)
page->gcolistnext->gcolistprev = page->gcolistprev;
if (page->gcolistprev)
page->gcolistprev->gcolistnext = page->gcolistnext;
else if (*gcopageset == page)
*gcopageset = page->gcolistnext;
}
// so long
(*g->frealloc)(g->ud, page, page->pageSize, 0);
}
static void freeclasspage(lua_State* L, lua_Page** freepageset, lua_Page** gcopageset, lua_Page* page, uint8_t sizeClass)
{
// remove page from freelist
if (page->next)
page->next->prev = page->prev;
if (page->prev)
page->prev->next = page->next;
else if (freepageset[sizeClass] == page)
freepageset[sizeClass] = page->next;
freepage(L, gcopageset, page);
}
static void* newblock(lua_State* L, int sizeClass)
{
global_State* g = L->global;
lua_Page* page = g->freepages[sizeClass];
// slow path: no page in the freelist, allocate a new one
if (!page)
page = newclasspage(L, g->freepages, NULL, sizeClass, true);
LUAU_ASSERT(!page->prev);
LUAU_ASSERT(page->freeList || page->freeNext >= 0);
LUAU_ASSERT(size_t(page->blockSize) == kSizeClassConfig.sizeOfClass[sizeClass] + kBlockHeader);
void* block;
if (page->freeNext >= 0)
{
block = &page->data + page->freeNext;
ASAN_UNPOISON_MEMORY_REGION(block, page->blockSize);
page->freeNext -= page->blockSize;
page->busyBlocks++;
}
else
{
block = page->freeList;
ASAN_UNPOISON_MEMORY_REGION(block, page->blockSize);
page->freeList = metadata(block);
page->busyBlocks++;
}
// the first word in a block point back to the page
metadata(block) = page;
// if we allocate the last block out of a page, we need to remove it from free list
if (!page->freeList && page->freeNext < 0)
{
g->freepages[sizeClass] = page->next;
if (page->next)
page->next->prev = NULL;
page->next = NULL;
}
// the user data is right after the metadata
return (char*)block + kBlockHeader;
}
static void* newgcoblock(lua_State* L, int sizeClass)
{
global_State* g = L->global;
lua_Page* page = g->freegcopages[sizeClass];
// slow path: no page in the freelist, allocate a new one
if (!page)
page = newclasspage(L, g->freegcopages, &g->allgcopages, sizeClass, false);
LUAU_ASSERT(!page->prev);
LUAU_ASSERT(page->freeList || page->freeNext >= 0);
LUAU_ASSERT(page->blockSize == kSizeClassConfig.sizeOfClass[sizeClass]);
void* block;
if (page->freeNext >= 0)
{
block = &page->data + page->freeNext;
ASAN_UNPOISON_MEMORY_REGION(block, page->blockSize);
page->freeNext -= page->blockSize;
page->busyBlocks++;
}
else
{
block = page->freeList;
ASAN_UNPOISON_MEMORY_REGION((char*)block + sizeof(GCheader), page->blockSize - sizeof(GCheader));
// when separate block metadata is not used, free list link is stored inside the block data itself
page->freeList = freegcolink(block);
page->busyBlocks++;
}
// if we allocate the last block out of a page, we need to remove it from free list
if (!page->freeList && page->freeNext < 0)
{
g->freegcopages[sizeClass] = page->next;
if (page->next)
page->next->prev = NULL;
page->next = NULL;
}
return block;
}
static void freeblock(lua_State* L, int sizeClass, void* block)
{
global_State* g = L->global;
// the user data is right after the metadata
LUAU_ASSERT(block);
block = (char*)block - kBlockHeader;
lua_Page* page = (lua_Page*)metadata(block);
LUAU_ASSERT(page && page->busyBlocks > 0);
LUAU_ASSERT(size_t(page->blockSize) == kSizeClassConfig.sizeOfClass[sizeClass] + kBlockHeader);
LUAU_ASSERT(block >= page->data && block < (char*)page + page->pageSize);
// if the page wasn't in the page free list, it should be now since it got a block!
if (!page->freeList && page->freeNext < 0)
{
LUAU_ASSERT(!page->prev);
LUAU_ASSERT(!page->next);
page->next = g->freepages[sizeClass];
if (page->next)
page->next->prev = page;
g->freepages[sizeClass] = page;
}
// add the block to the free list inside the page
metadata(block) = page->freeList;
page->freeList = block;
ASAN_POISON_MEMORY_REGION(block, page->blockSize);
page->busyBlocks--;
// if it's the last block in the page, we don't need the page
if (page->busyBlocks == 0)
freeclasspage(L, g->freepages, NULL, page, sizeClass);
}
static void freegcoblock(lua_State* L, int sizeClass, void* block, lua_Page* page)
{
LUAU_ASSERT(page && page->busyBlocks > 0);
LUAU_ASSERT(page->blockSize == kSizeClassConfig.sizeOfClass[sizeClass]);
LUAU_ASSERT(block >= page->data && block < (char*)page + page->pageSize);
global_State* g = L->global;
// if the page wasn't in the page free list, it should be now since it got a block!
if (!page->freeList && page->freeNext < 0)
{
LUAU_ASSERT(!page->prev);
LUAU_ASSERT(!page->next);
page->next = g->freegcopages[sizeClass];
if (page->next)
page->next->prev = page;
g->freegcopages[sizeClass] = page;
}
// when separate block metadata is not used, free list link is stored inside the block data itself
freegcolink(block) = page->freeList;
page->freeList = block;
ASAN_POISON_MEMORY_REGION((char*)block + sizeof(GCheader), page->blockSize - sizeof(GCheader));
page->busyBlocks--;
// if it's the last block in the page, we don't need the page
if (page->busyBlocks == 0)
freeclasspage(L, g->freegcopages, &g->allgcopages, page, sizeClass);
}
void* luaM_new_(lua_State* L, size_t nsize, uint8_t memcat)
{
global_State* g = L->global;
int nclass = sizeclass(nsize);
void* block = nclass >= 0 ? newblock(L, nclass) : (*g->frealloc)(g->ud, NULL, 0, nsize);
if (block == NULL && nsize > 0)
luaD_throw(L, LUA_ERRMEM);
g->totalbytes += nsize;
g->memcatbytes[memcat] += nsize;
return block;
}
GCObject* luaM_newgco_(lua_State* L, size_t nsize, uint8_t memcat)
{
// we need to accommodate space for link for free blocks (freegcolink)
LUAU_ASSERT(nsize >= kGCOLinkOffset + sizeof(void*));
global_State* g = L->global;
int nclass = sizeclass(nsize);
void* block = NULL;
if (nclass >= 0)
{
block = newgcoblock(L, nclass);
}
else
{
lua_Page* page = newpage(L, &g->allgcopages, offsetof(lua_Page, data) + int(nsize), int(nsize), 1);
block = &page->data;
ASAN_UNPOISON_MEMORY_REGION(block, page->blockSize);
page->freeNext -= page->blockSize;
page->busyBlocks++;
}
if (block == NULL && nsize > 0)
luaD_throw(L, LUA_ERRMEM);
g->totalbytes += nsize;
g->memcatbytes[memcat] += nsize;
return (GCObject*)block;
}
void luaM_free_(lua_State* L, void* block, size_t osize, uint8_t memcat)
{
global_State* g = L->global;
LUAU_ASSERT((osize == 0) == (block == NULL));
int oclass = sizeclass(osize);
if (oclass >= 0)
freeblock(L, oclass, block);
else
(*g->frealloc)(g->ud, block, osize, 0);
g->totalbytes -= osize;
g->memcatbytes[memcat] -= osize;
}
void luaM_freegco_(lua_State* L, GCObject* block, size_t osize, uint8_t memcat, lua_Page* page)
{
global_State* g = L->global;
LUAU_ASSERT((osize == 0) == (block == NULL));
int oclass = sizeclass(osize);
if (oclass >= 0)
{
block->gch.tt = LUA_TNIL;
freegcoblock(L, oclass, block, page);
}
else
{
LUAU_ASSERT(page->busyBlocks == 1);
LUAU_ASSERT(size_t(page->blockSize) == osize);
LUAU_ASSERT((void*)block == page->data);
freepage(L, &g->allgcopages, page);
}
g->totalbytes -= osize;
g->memcatbytes[memcat] -= osize;
}
void* luaM_realloc_(lua_State* L, void* block, size_t osize, size_t nsize, uint8_t memcat)
{
global_State* g = L->global;
LUAU_ASSERT((osize == 0) == (block == NULL));
int nclass = sizeclass(nsize);
int oclass = sizeclass(osize);
void* result;
// if either block needs to be allocated using a block allocator, we can't use realloc directly
if (nclass >= 0 || oclass >= 0)
{
result = nclass >= 0 ? newblock(L, nclass) : (*g->frealloc)(g->ud, NULL, 0, nsize);
if (result == NULL && nsize > 0)
luaD_throw(L, LUA_ERRMEM);
if (osize > 0 && nsize > 0)
memcpy(result, block, osize < nsize ? osize : nsize);
if (oclass >= 0)
freeblock(L, oclass, block);
else
(*g->frealloc)(g->ud, block, osize, 0);
}
else
{
result = (*g->frealloc)(g->ud, block, osize, nsize);
if (result == NULL && nsize > 0)
luaD_throw(L, LUA_ERRMEM);
}
LUAU_ASSERT((nsize == 0) == (result == NULL));
g->totalbytes = (g->totalbytes - osize) + nsize;
g->memcatbytes[memcat] += nsize - osize;
return result;
}
void luaM_getpagewalkinfo(lua_Page* page, char** start, char** end, int* busyBlocks, int* blockSize)
{
int blockCount = (page->pageSize - offsetof(lua_Page, data)) / page->blockSize;
LUAU_ASSERT(page->freeNext >= -page->blockSize && page->freeNext <= (blockCount - 1) * page->blockSize);
char* data = page->data; // silences ubsan when indexing page->data
*start = data + page->freeNext + page->blockSize;
*end = data + blockCount * page->blockSize;
*busyBlocks = page->busyBlocks;
*blockSize = page->blockSize;
}
lua_Page* luaM_getnextgcopage(lua_Page* page)
{
return page->gcolistnext;
}
void luaM_visitpage(lua_Page* page, void* context, bool (*visitor)(void* context, lua_Page* page, GCObject* gco))
{
char* start;
char* end;
int busyBlocks;
int blockSize;
luaM_getpagewalkinfo(page, &start, &end, &busyBlocks, &blockSize);
for (char* pos = start; pos != end; pos += blockSize)
{
GCObject* gco = (GCObject*)pos;
// skip memory blocks that are already freed
if (gco->gch.tt == LUA_TNIL)
continue;
// when true is returned it means that the element was deleted
if (visitor(context, page, gco))
{
LUAU_ASSERT(busyBlocks > 0);
// if the last block was removed, page would be removed as well
if (--busyBlocks == 0)
break;
}
}
}
void luaM_visitgco(lua_State* L, void* context, bool (*visitor)(void* context, lua_Page* page, GCObject* gco))
{
global_State* g = L->global;
for (lua_Page* curr = g->allgcopages; curr;)
{
lua_Page* next = curr->gcolistnext; // block visit might destroy the page
luaM_visitpage(curr, context, visitor);
curr = next;
}
}

View File

@ -1,32 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lua.h"
struct lua_Page;
union GCObject;
#define luaM_newgco(L, t, size, memcat) cast_to(t*, luaM_newgco_(L, size, memcat))
#define luaM_freegco(L, p, size, memcat, page) luaM_freegco_(L, obj2gco(p), size, memcat, page)
#define luaM_arraysize_(L, n, e) ((cast_to(size_t, (n)) <= SIZE_MAX / (e)) ? (n) * (e) : (luaM_toobig(L), SIZE_MAX))
#define luaM_newarray(L, n, t, memcat) cast_to(t*, luaM_new_(L, luaM_arraysize_(L, n, sizeof(t)), memcat))
#define luaM_freearray(L, b, n, t, memcat) luaM_free_(L, (b), (n) * sizeof(t), memcat)
#define luaM_reallocarray(L, v, oldn, n, t, memcat) \
((v) = cast_to(t*, luaM_realloc_(L, v, (oldn) * sizeof(t), luaM_arraysize_(L, n, sizeof(t)), memcat)))
LUAI_FUNC void* luaM_new_(lua_State* L, size_t nsize, uint8_t memcat);
LUAI_FUNC GCObject* luaM_newgco_(lua_State* L, size_t nsize, uint8_t memcat);
LUAI_FUNC void luaM_free_(lua_State* L, void* block, size_t osize, uint8_t memcat);
LUAI_FUNC void luaM_freegco_(lua_State* L, GCObject* block, size_t osize, uint8_t memcat, lua_Page* page);
LUAI_FUNC void* luaM_realloc_(lua_State* L, void* block, size_t osize, size_t nsize, uint8_t memcat);
LUAI_FUNC l_noret luaM_toobig(lua_State* L);
LUAI_FUNC void luaM_getpagewalkinfo(lua_Page* page, char** start, char** end, int* busyBlocks, int* blockSize);
LUAI_FUNC lua_Page* luaM_getnextgcopage(lua_Page* page);
LUAI_FUNC void luaM_visitpage(lua_Page* page, void* context, bool (*visitor)(void* context, lua_Page* page, GCObject* gco));
LUAI_FUNC void luaM_visitgco(lua_State* L, void* context, bool (*visitor)(void* context, lua_Page* page, GCObject* gco));

View File

@ -1,366 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "luaconf.h"
#include "lnumutils.h"
#include "lcommon.h"
#include <string.h>
#ifdef _MSC_VER
#include <intrin.h>
#endif
// This work is based on:
// Raffaello Giulietti. The Schubfach way to render doubles. 2021
// https://drive.google.com/file/d/1IEeATSVnEE6TkrHlCYNY2GjaraBjOT4f/edit
// The code uses the notation from the paper for local variables where appropriate, and refers to paper sections/figures/results.
// 9.8.2. Precomputed table for 128-bit overestimates of powers of 10 (see figure 3 for table bounds)
// To avoid storing 616 128-bit numbers directly we use a technique inspired by Dragonbox implementation and store 16 consecutive
// powers using a 128-bit baseline and a bitvector with 1-bit scale and 3-bit offset for the delta between each entry and base*5^k
static const int kPow10TableMin = -292;
static const int kPow10TableMax = 324;
// clang-format off
static const uint64_t kPow5Table[16] = {
0x8000000000000000, 0xa000000000000000, 0xc800000000000000, 0xfa00000000000000, 0x9c40000000000000, 0xc350000000000000,
0xf424000000000000, 0x9896800000000000, 0xbebc200000000000, 0xee6b280000000000, 0x9502f90000000000, 0xba43b74000000000,
0xe8d4a51000000000, 0x9184e72a00000000, 0xb5e620f480000000, 0xe35fa931a0000000,
};
static const uint64_t kPow10Table[(kPow10TableMax - kPow10TableMin + 1 + 15) / 16][3] = {
{0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b, 0x333443443333443b}, {0x8dd01fad907ffc3b, 0xae3da7d97f6792e4, 0xbbb3ab3cb3ba3cbc},
{0x9d71ac8fada6c9b5, 0x6f773fc3603db4aa, 0x4ba4bc4bb4bb4bcc}, {0xaecc49914078536d, 0x58fae9f773886e19, 0x3ba3bc33b43b43bb},
{0xc21094364dfb5636, 0x985915fc12f542e5, 0x33b43b43a33b33cb}, {0xd77485cb25823ac7, 0x7d633293366b828c, 0x34b44c444343443c},
{0xef340a98172aace4, 0x86fb897116c87c35, 0x333343333343334b}, {0x84c8d4dfd2c63f3b, 0x29ecd9f40041e074, 0xccaccbbcbcbb4bbc},
{0x936b9fcebb25c995, 0xcab10dd900beec35, 0x3ab3ab3ab3bb3bbb}, {0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebbb, 0x4cc3dc4db4db4dbb},
{0xb5b5ada8aaff80b8, 0x0d819992132456bb, 0x33b33a34c33b34ab}, {0xc9bcff6034c13052, 0xfc89b393dd02f0b6, 0x33c33b44b43c34bc},
{0xdff9772470297ebd, 0x59787e2b93bc56f8, 0x43b444444443434c}, {0xf8a95fcf88747d94, 0x75a44c6397ce912b, 0x443334343443343b},
{0x8a08f0f8bf0f156b, 0x1b8e9ecb641b5900, 0xbbabab3aa3ab4ccc}, {0x993fe2c6d07b7fab, 0xe546a8038efe402a, 0x4cb4bc4db4db4bcc},
{0xaa242499697392d2, 0xdde50bd1d5d0b9ea, 0x3ba3ba3bb33b33bc}, {0xbce5086492111aea, 0x88f4bb1ca6bcf585, 0x44b44c44c44c43cb},
{0xd1b71758e219652b, 0xd3c36113404ea4a9, 0x44c44c44c444443b}, {0xe8d4a51000000000, 0x0000000000000000, 0x444444444444444c},
{0x813f3978f8940984, 0x4000000000000000, 0xcccccccccccccccc}, {0x8f7e32ce7bea5c6f, 0xe4820023a2000000, 0xbba3bc4cc4cc4ccc},
{0x9f4f2726179a2245, 0x01d762422c946591, 0x4aa3bb3aa3ba3bab}, {0xb0de65388cc8ada8, 0x3b25a55f43294bcc, 0x3ca33b33b44b43bc},
{0xc45d1df942711d9a, 0x3ba5d0bd324f8395, 0x44c44c34c44b44cb}, {0xda01ee641a708de9, 0xe80e6f4820cc9496, 0x33b33b343333333c},
{0xf209787bb47d6b84, 0xc0678c5dbd23a49b, 0x443444444443443b}, {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b3, 0xdbccbcccb4cb3bbb},
{0x952ab45cfa97a0b2, 0xdd945a747bf26184, 0x3bc4bb4ab3ca3cbc}, {0xa59bc234db398c25, 0x43fab9837e699096, 0x3bb3ac3ab3bb33ac},
{0xb7dcbf5354e9bece, 0x0c11ed6d538aeb30, 0x33b43b43b34c34dc}, {0xcc20ce9bd35c78a5, 0x31ec038df7b441f5, 0x34c44c43c44b44cb},
{0xe2a0b5dc971f303a, 0x2e44ae64840fd61e, 0x333333333333333c}, {0xfb9b7cd9a4a7443c, 0x169840ef017da3b2, 0x433344443333344c},
{0x8bab8eefb6409c1a, 0x1ad089b6c2f7548f, 0xdcbdcc3cc4cc4bcb}, {0x9b10a4e5e9913128, 0xca7cf2b4191c8327, 0x3ab3cb3bc3bb4bbb},
{0xac2820d9623bf429, 0x546345fa9fbdcd45, 0x3bb3cc43c43c43cb}, {0xbf21e44003acdd2c, 0xe0470a63e6bd56c4, 0x44b34a43b44c44bc},
{0xd433179d9c8cb841, 0x5fa60692a46151ec, 0x43a33a33a333333c},
};
// clang-format on
static const char kDigitTable[] = "0001020304050607080910111213141516171819202122232425262728293031323334353637383940414243444546474849"
"5051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899";
// x*y => 128-bit product (lo+hi)
inline uint64_t mul128(uint64_t x, uint64_t y, uint64_t* hi)
{
#if defined(_MSC_VER) && defined(_M_X64)
return _umul128(x, y, hi);
#elif defined(__SIZEOF_INT128__)
unsigned __int128 r = x;
r *= y;
*hi = uint64_t(r >> 64);
return uint64_t(r);
#else
uint32_t x0 = uint32_t(x), x1 = uint32_t(x >> 32);
uint32_t y0 = uint32_t(y), y1 = uint32_t(y >> 32);
uint64_t p11 = uint64_t(x1) * y1, p01 = uint64_t(x0) * y1;
uint64_t p10 = uint64_t(x1) * y0, p00 = uint64_t(x0) * y0;
uint64_t mid = p10 + (p00 >> 32) + uint32_t(p01);
uint64_t r0 = (mid << 32) | uint32_t(p00);
uint64_t r1 = p11 + (mid >> 32) + (p01 >> 32);
*hi = r1;
return r0;
#endif
}
// (x*y)>>64 => 128-bit product (lo+hi)
inline uint64_t mul192hi(uint64_t xhi, uint64_t xlo, uint64_t y, uint64_t* hi)
{
uint64_t z2;
uint64_t z1 = mul128(xhi, y, &z2);
uint64_t z1c;
uint64_t z0 = mul128(xlo, y, &z1c);
(void)z0;
z1 += z1c;
z2 += (z1 < z1c);
*hi = z2;
return z1;
}
// 9.3. Rounding to odd (+ figure 8 + result 23)
inline uint64_t roundodd(uint64_t ghi, uint64_t glo, uint64_t cp)
{
uint64_t xhi;
uint64_t xlo = mul128(glo, cp, &xhi);
(void)xlo;
uint64_t yhi;
uint64_t ylo = mul128(ghi, cp, &yhi);
uint64_t z = ylo + xhi;
return (yhi + (z < xhi)) | (z > 1);
}
struct Decimal
{
uint64_t s;
int k;
};
static Decimal schubfach(int exponent, uint64_t fraction)
{
// Extract c & q such that c*2^q == |v|
uint64_t c = fraction;
int q = exponent - 1023 - 51;
if (exponent != 0) // normal numbers have implicit leading 1
{
c |= (1ull << 52);
q--;
}
// 8.3. Fast path for integers
if (unsigned(-q) < 53 && (c & ((1ull << (-q)) - 1)) == 0)
return {c >> (-q), 0};
// 5. Rounding interval
int irr = (c == (1ull << 52) && q != -1074); // Qmin
int out = int(c & 1);
// 9.8.1. Boundaries for c
uint64_t cbl = 4 * c - 2 + irr;
uint64_t cb = 4 * c;
uint64_t cbr = 4 * c + 2;
// 9.1. Computing k and h
const int Q = 20;
const int C = 315652; // floor(2^Q * log10(2))
const int A = -131008; // floor(2^Q * log10(3/4))
const int C2 = 3483294; // floor(2^Q * log2(10))
int k = (q * C + (irr ? A : 0)) >> Q;
int h = q + ((-k * C2) >> Q) + 1; // see (9) in 9.9
// 9.8.2. Overestimates of powers of 10
// Recover 10^-k fraction using compact tables generated by tools/numutils.py
// The 128-bit fraction is encoded as 128-bit baseline * power-of-5 * scale + offset
LUAU_ASSERT(-k >= kPow10TableMin && -k <= kPow10TableMax);
int gtoff = -k - kPow10TableMin;
const uint64_t* gt = kPow10Table[gtoff >> 4];
uint64_t ghi;
uint64_t glo = mul192hi(gt[0], gt[1], kPow5Table[gtoff & 15], &ghi);
// Apply 1-bit scale + 3-bit offset; note, offset is intentionally applied without carry, numutils.py validates that this is sufficient
int gterr = (gt[2] >> ((gtoff & 15) * 4)) & 15;
int gtscale = gterr >> 3;
ghi <<= gtscale;
ghi += (glo >> 63) & gtscale;
glo <<= gtscale;
glo -= (gterr & 7) - 4;
// 9.9. Boundaries for v
uint64_t vbl = roundodd(ghi, glo, cbl << h);
uint64_t vb = roundodd(ghi, glo, cb << h);
uint64_t vbr = roundodd(ghi, glo, cbr << h);
// Main algorithm; see figure 7 + figure 9
uint64_t s = vb / 4;
if (s >= 10)
{
uint64_t sp = s / 10;
bool upin = vbl + out <= 40 * sp;
bool wpin = vbr >= 40 * sp + 40 + out;
if (upin != wpin)
return {sp + wpin, k + 1};
}
// Figure 7 contains the algorithm to select between u (s) and w (s+1)
// rup computes the last 4 conditions in that algorithm
// rup is only used when uin == win, but since these branches predict poorly we use branchless selects
bool uin = vbl + out <= 4 * s;
bool win = 4 * s + 4 + out <= vbr;
bool rup = vb >= 4 * s + 2 + 1 - (s & 1);
return {s + (uin != win ? win : rup), k};
}
static char* printspecial(char* buf, int sign, uint64_t fraction)
{
if (fraction == 0)
{
memcpy(buf, ("-inf") + (1 - sign), 4);
return buf + 3 + sign;
}
else
{
memcpy(buf, "nan", 4);
return buf + 3;
}
}
static char* printunsignedrev(char* end, uint64_t num)
{
while (num >= 10000)
{
unsigned int tail = unsigned(num % 10000);
memcpy(end - 4, &kDigitTable[int(tail / 100) * 2], 2);
memcpy(end - 2, &kDigitTable[int(tail % 100) * 2], 2);
num /= 10000;
end -= 4;
}
unsigned int rest = unsigned(num);
while (rest >= 10)
{
memcpy(end - 2, &kDigitTable[int(rest % 100) * 2], 2);
rest /= 100;
end -= 2;
}
if (rest)
{
end[-1] = '0' + int(rest);
end -= 1;
}
return end;
}
static char* printexp(char* buf, int num)
{
*buf++ = 'e';
*buf++ = num < 0 ? '-' : '+';
int v = num < 0 ? -num : num;
if (v >= 100)
{
*buf++ = '0' + (v / 100);
v %= 100;
}
memcpy(buf, &kDigitTable[v * 2], 2);
return buf + 2;
}
inline char* trimzero(char* end)
{
while (end[-1] == '0')
end--;
return end;
}
// We use fixed-length memcpy/memset since they lower to fast SIMD+scalar writes; the target buffers should have padding space
#define fastmemcpy(dst, src, size, sizefast) check_exp((size) <= sizefast, memcpy(dst, src, sizefast))
#define fastmemset(dst, val, size, sizefast) check_exp((size) <= sizefast, memset(dst, val, sizefast))
char* luai_num2str(char* buf, double n)
{
// IEEE-754
union
{
double v;
uint64_t bits;
} v = {n};
int sign = int(v.bits >> 63);
int exponent = int(v.bits >> 52) & 2047;
uint64_t fraction = v.bits & ((1ull << 52) - 1);
// specials
if (LUAU_UNLIKELY(exponent == 0x7ff))
return printspecial(buf, sign, fraction);
// sign bit
*buf = '-';
buf += sign;
// zero
if (exponent == 0 && fraction == 0)
{
buf[0] = '0';
return buf + 1;
}
// convert binary to decimal using Schubfach
Decimal d = schubfach(exponent, fraction);
LUAU_ASSERT(d.s < uint64_t(1e17));
// print the decimal to a temporary buffer; we'll need to insert the decimal point and figure out the format
char decbuf[40];
char* decend = decbuf + 20; // significand needs at most 17 digits; the rest of the buffer may be copied using fixed length memcpy
char* dec = printunsignedrev(decend, d.s);
int declen = int(decend - dec);
LUAU_ASSERT(declen <= 17);
int dot = declen + d.k;
// the limits are somewhat arbitrary but changing them may require changing fastmemset/fastmemcpy sizes below
if (dot >= -5 && dot <= 21)
{
// fixed point format
if (dot <= 0)
{
buf[0] = '0';
buf[1] = '.';
fastmemset(buf + 2, '0', -dot, 5);
fastmemcpy(buf + 2 + (-dot), dec, declen, 17);
return trimzero(buf + 2 + (-dot) + declen);
}
else if (dot == declen)
{
// no dot
fastmemcpy(buf, dec, dot, 17);
return buf + dot;
}
else if (dot < declen)
{
// dot in the middle
fastmemcpy(buf, dec, dot, 16);
buf[dot] = '.';
fastmemcpy(buf + dot + 1, dec + dot, declen - dot, 16);
return trimzero(buf + declen + 1);
}
else
{
// no dot, zero padding
fastmemcpy(buf, dec, declen, 17);
fastmemset(buf + declen, '0', dot - declen, 8);
return buf + dot;
}
}
else
{
// scientific format
buf[0] = dec[0];
buf[1] = '.';
fastmemcpy(buf + 2, dec + 1, declen - 1, 16);
char* exp = trimzero(buf + declen + 1);
return printexp(exp, dot - 1);
}
}

View File

@ -1,62 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include <math.h>
#define luai_numadd(a, b) ((a) + (b))
#define luai_numsub(a, b) ((a) - (b))
#define luai_nummul(a, b) ((a) * (b))
#define luai_numdiv(a, b) ((a) / (b))
#define luai_numpow(a, b) (pow(a, b))
#define luai_numunm(a) (-(a))
#define luai_numisnan(a) ((a) != (a))
#define luai_numeq(a, b) ((a) == (b))
#define luai_numlt(a, b) ((a) < (b))
#define luai_numle(a, b) ((a) <= (b))
inline bool luai_veceq(const float* a, const float* b)
{
#if LUA_VECTOR_SIZE == 4
return a[0] == b[0] && a[1] == b[1] && a[2] == b[2] && a[3] == b[3];
#else
return a[0] == b[0] && a[1] == b[1] && a[2] == b[2];
#endif
}
inline bool luai_vecisnan(const float* a)
{
#if LUA_VECTOR_SIZE == 4
return a[0] != a[0] || a[1] != a[1] || a[2] != a[2] || a[3] != a[3];
#else
return a[0] != a[0] || a[1] != a[1] || a[2] != a[2];
#endif
}
LUAU_FASTMATH_BEGIN
inline double luai_nummod(double a, double b)
{
return a - floor(a / b) * b;
}
LUAU_FASTMATH_END
#define luai_num2int(i, d) ((i) = (int)(d))
// On MSVC in 32-bit, double to unsigned cast compiles into a call to __dtoui3, so we invoke x87->int64 conversion path manually
#if defined(_MSC_VER) && defined(_M_IX86)
#define luai_num2unsigned(i, n) \
{ \
__int64 l; \
__asm { __asm fld n __asm fistp l} \
; \
i = (unsigned int)l; \
}
#else
#define luai_num2unsigned(i, n) ((i) = (unsigned)(long long)(n))
#endif
#define LUAI_MAXNUM2STR 48
LUAI_FUNC char* luai_num2str(char* buf, double n);
#define luai_str2num(s, p) strtod((s), (p))

View File

@ -1,154 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lobject.h"
#include "lstate.h"
#include "lstring.h"
#include "lgc.h"
#include "ldo.h"
#include "lnumutils.h"
#include <ctype.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
const TValue luaO_nilobject_ = {{NULL}, {0}, LUA_TNIL};
int luaO_log2(unsigned int x)
{
static const uint8_t log_2[256] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8};
int l = -1;
while (x >= 256)
{
l += 8;
x >>= 8;
}
return l + log_2[x];
}
int luaO_rawequalObj(const TValue* t1, const TValue* t2)
{
if (ttype(t1) != ttype(t2))
return 0;
else
switch (ttype(t1))
{
case LUA_TNIL:
return 1;
case LUA_TNUMBER:
return luai_numeq(nvalue(t1), nvalue(t2));
case LUA_TVECTOR:
return luai_veceq(vvalue(t1), vvalue(t2));
case LUA_TBOOLEAN:
return bvalue(t1) == bvalue(t2); // boolean true must be 1 !!
case LUA_TLIGHTUSERDATA:
return pvalue(t1) == pvalue(t2);
default:
LUAU_ASSERT(iscollectable(t1));
return gcvalue(t1) == gcvalue(t2);
}
}
int luaO_rawequalKey(const TKey* t1, const TValue* t2)
{
if (ttype(t1) != ttype(t2))
return 0;
else
switch (ttype(t1))
{
case LUA_TNIL:
return 1;
case LUA_TNUMBER:
return luai_numeq(nvalue(t1), nvalue(t2));
case LUA_TVECTOR:
return luai_veceq(vvalue(t1), vvalue(t2));
case LUA_TBOOLEAN:
return bvalue(t1) == bvalue(t2); // boolean true must be 1 !!
case LUA_TLIGHTUSERDATA:
return pvalue(t1) == pvalue(t2);
default:
LUAU_ASSERT(iscollectable(t1));
return gcvalue(t1) == gcvalue(t2);
}
}
int luaO_str2d(const char* s, double* result)
{
char* endptr;
*result = luai_str2num(s, &endptr);
if (endptr == s)
return 0; // conversion failed
if (*endptr == 'x' || *endptr == 'X') // maybe an hexadecimal constant?
*result = cast_num(strtoul(s, &endptr, 16));
if (*endptr == '\0')
return 1; // most common case
while (isspace(cast_to(unsigned char, *endptr)))
endptr++;
if (*endptr != '\0')
return 0; // invalid trailing characters?
return 1;
}
const char* luaO_pushvfstring(lua_State* L, const char* fmt, va_list argp)
{
char result[LUA_BUFFERSIZE];
vsnprintf(result, sizeof(result), fmt, argp);
setsvalue(L, L->top, luaS_new(L, result));
incr_top(L);
return svalue(L->top - 1);
}
const char* luaO_pushfstring(lua_State* L, const char* fmt, ...)
{
const char* msg;
va_list argp;
va_start(argp, fmt);
msg = luaO_pushvfstring(L, fmt, argp);
va_end(argp);
return msg;
}
const char* luaO_chunkid(char* buf, size_t buflen, const char* source, size_t srclen)
{
if (*source == '=')
{
if (srclen <= buflen)
return source + 1;
// truncate the part after =
memcpy(buf, source + 1, buflen - 1);
buf[buflen - 1] = '\0';
}
else if (*source == '@')
{
if (srclen <= buflen)
return source + 1;
// truncate the part after @
memcpy(buf, "...", 3);
memcpy(buf + 3, source + srclen - (buflen - 4), buflen - 4);
buf[buflen - 1] = '\0';
}
else
{ // buf = [string "string"]
size_t len = strcspn(source, "\n\r"); // stop at first newline
buflen -= sizeof("[string \"...\"]");
if (len > buflen)
len = buflen;
strcpy(buf, "[string \"");
if (source[len] != '\0')
{ // must truncate?
strncat(buf, source, len);
strcat(buf, "...");
}
else
strcat(buf, source);
strcat(buf, "\"]");
}
return buf;
}

View File

@ -1,464 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lua.h"
#include "lcommon.h"
/*
** Union of all collectible objects
*/
typedef union GCObject GCObject;
/*
** Common Header for all collectible objects (in macro form, to be included in other objects)
*/
// clang-format off
#define CommonHeader \
uint8_t tt; uint8_t marked; uint8_t memcat
// clang-format on
/*
** Common header in struct form
*/
typedef struct GCheader
{
CommonHeader;
} GCheader;
/*
** Union of all Lua values
*/
typedef union
{
GCObject* gc;
void* p;
double n;
int b;
float v[2]; // v[0], v[1] live here; v[2] lives in TValue::extra
} Value;
/*
** Tagged Values
*/
typedef struct lua_TValue
{
Value value;
int extra[LUA_EXTRA_SIZE];
int tt;
} TValue;
// Macros to test type
#define ttisnil(o) (ttype(o) == LUA_TNIL)
#define ttisnumber(o) (ttype(o) == LUA_TNUMBER)
#define ttisstring(o) (ttype(o) == LUA_TSTRING)
#define ttistable(o) (ttype(o) == LUA_TTABLE)
#define ttisfunction(o) (ttype(o) == LUA_TFUNCTION)
#define ttisboolean(o) (ttype(o) == LUA_TBOOLEAN)
#define ttisuserdata(o) (ttype(o) == LUA_TUSERDATA)
#define ttisthread(o) (ttype(o) == LUA_TTHREAD)
#define ttislightuserdata(o) (ttype(o) == LUA_TLIGHTUSERDATA)
#define ttisvector(o) (ttype(o) == LUA_TVECTOR)
#define ttisupval(o) (ttype(o) == LUA_TUPVAL)
// Macros to access values
#define ttype(o) ((o)->tt)
#define gcvalue(o) check_exp(iscollectable(o), (o)->value.gc)
#define pvalue(o) check_exp(ttislightuserdata(o), (o)->value.p)
#define nvalue(o) check_exp(ttisnumber(o), (o)->value.n)
#define vvalue(o) check_exp(ttisvector(o), (o)->value.v)
#define tsvalue(o) check_exp(ttisstring(o), &(o)->value.gc->ts)
#define uvalue(o) check_exp(ttisuserdata(o), &(o)->value.gc->u)
#define clvalue(o) check_exp(ttisfunction(o), &(o)->value.gc->cl)
#define hvalue(o) check_exp(ttistable(o), &(o)->value.gc->h)
#define bvalue(o) check_exp(ttisboolean(o), (o)->value.b)
#define thvalue(o) check_exp(ttisthread(o), &(o)->value.gc->th)
#define upvalue(o) check_exp(ttisupval(o), &(o)->value.gc->uv)
#define l_isfalse(o) (ttisnil(o) || (ttisboolean(o) && bvalue(o) == 0))
/*
** for internal debug only
*/
#define checkconsistency(obj) LUAU_ASSERT(!iscollectable(obj) || (ttype(obj) == (obj)->value.gc->gch.tt))
#define checkliveness(g, obj) LUAU_ASSERT(!iscollectable(obj) || ((ttype(obj) == (obj)->value.gc->gch.tt) && !isdead(g, (obj)->value.gc)))
// Macros to set values
#define setnilvalue(obj) ((obj)->tt = LUA_TNIL)
#define setnvalue(obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.n = (x); \
i_o->tt = LUA_TNUMBER; \
}
#if LUA_VECTOR_SIZE == 4
#define setvvalue(obj, x, y, z, w) \
{ \
TValue* i_o = (obj); \
float* i_v = i_o->value.v; \
i_v[0] = (x); \
i_v[1] = (y); \
i_v[2] = (z); \
i_v[3] = (w); \
i_o->tt = LUA_TVECTOR; \
}
#else
#define setvvalue(obj, x, y, z, w) \
{ \
TValue* i_o = (obj); \
float* i_v = i_o->value.v; \
i_v[0] = (x); \
i_v[1] = (y); \
i_v[2] = (z); \
i_o->tt = LUA_TVECTOR; \
}
#endif
#define setpvalue(obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.p = (x); \
i_o->tt = LUA_TLIGHTUSERDATA; \
}
#define setbvalue(obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.b = (x); \
i_o->tt = LUA_TBOOLEAN; \
}
#define setsvalue(L, obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.gc = cast_to(GCObject*, (x)); \
i_o->tt = LUA_TSTRING; \
checkliveness(L->global, i_o); \
}
#define setuvalue(L, obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.gc = cast_to(GCObject*, (x)); \
i_o->tt = LUA_TUSERDATA; \
checkliveness(L->global, i_o); \
}
#define setthvalue(L, obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.gc = cast_to(GCObject*, (x)); \
i_o->tt = LUA_TTHREAD; \
checkliveness(L->global, i_o); \
}
#define setclvalue(L, obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.gc = cast_to(GCObject*, (x)); \
i_o->tt = LUA_TFUNCTION; \
checkliveness(L->global, i_o); \
}
#define sethvalue(L, obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.gc = cast_to(GCObject*, (x)); \
i_o->tt = LUA_TTABLE; \
checkliveness(L->global, i_o); \
}
#define setptvalue(L, obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.gc = cast_to(GCObject*, (x)); \
i_o->tt = LUA_TPROTO; \
checkliveness(L->global, i_o); \
}
#define setupvalue(L, obj, x) \
{ \
TValue* i_o = (obj); \
i_o->value.gc = cast_to(GCObject*, (x)); \
i_o->tt = LUA_TUPVAL; \
checkliveness(L->global, i_o); \
}
#define setobj(L, obj1, obj2) \
{ \
const TValue* o2 = (obj2); \
TValue* o1 = (obj1); \
*o1 = *o2; \
checkliveness(L->global, o1); \
}
/*
** different types of sets, according to destination
*/
// to stack
#define setobj2s setobj
// from table to same table (no barrier)
#define setobjt2t setobj
// to table (needs barrier)
#define setobj2t setobj
// to new object (no barrier)
#define setobj2n setobj
#define setttype(obj, tt) (ttype(obj) = (tt))
#define iscollectable(o) (ttype(o) >= LUA_TSTRING)
typedef TValue* StkId; // index to stack elements
/*
** String headers for string table
*/
typedef struct TString
{
CommonHeader;
// 1 byte padding
int16_t atom;
// 2 byte padding
TString* next; // next string in the hash table bucket
unsigned int hash;
unsigned int len;
char data[1]; // string data is allocated right after the header
} TString;
#define getstr(ts) (ts)->data
#define svalue(o) getstr(tsvalue(o))
typedef struct Udata
{
CommonHeader;
uint8_t tag;
int len;
struct Table* metatable;
union
{
char data[1]; // userdata is allocated right after the header
L_Umaxalign dummy; // ensures maximum alignment for data
};
} Udata;
/*
** Function Prototypes
*/
// clang-format off
typedef struct Proto
{
CommonHeader;
TValue* k; // constants used by the function
Instruction* code; // function bytecode
struct Proto** p; // functions defined inside the function
uint8_t* lineinfo; // for each instruction, line number as a delta from baseline
int* abslineinfo; // baseline line info, one entry for each 1<<linegaplog2 instructions; allocated after lineinfo
struct LocVar* locvars; // information about local variables
TString** upvalues; // upvalue names
TString* source;
TString* debugname;
uint8_t* debuginsn; // a copy of code[] array with just opcodes
const Instruction* codeentry;
void* execdata;
uintptr_t exectarget;
GCObject* gclist;
int sizecode;
int sizep;
int sizelocvars;
int sizeupvalues;
int sizek;
int sizelineinfo;
int linegaplog2;
int linedefined;
int bytecodeid;
uint8_t nups; // number of upvalues
uint8_t numparams;
uint8_t is_vararg;
uint8_t maxstacksize;
} Proto;
// clang-format on
typedef struct LocVar
{
TString* varname;
int startpc; // first point where variable is active
int endpc; // first point where variable is dead
uint8_t reg; // register slot, relative to base, where variable is stored
} LocVar;
/*
** Upvalues
*/
typedef struct UpVal
{
CommonHeader;
uint8_t markedopen; // set if reachable from an alive thread (only valid during atomic)
// 4 byte padding (x64)
TValue* v; // points to stack or to its own value
union
{
TValue value; // the value (when closed)
struct
{
// global double linked list (when open)
struct UpVal* prev;
struct UpVal* next;
// thread linked list (when open)
struct UpVal* threadnext;
} open;
} u;
} UpVal;
#define upisopen(up) ((up)->v != &(up)->u.value)
/*
** Closures
*/
typedef struct Closure
{
CommonHeader;
uint8_t isC;
uint8_t nupvalues;
uint8_t stacksize;
uint8_t preload;
GCObject* gclist;
struct Table* env;
union
{
struct
{
lua_CFunction f;
lua_Continuation cont;
const char* debugname;
TValue upvals[1];
} c;
struct
{
struct Proto* p;
TValue uprefs[1];
} l;
};
} Closure;
#define iscfunction(o) (ttype(o) == LUA_TFUNCTION && clvalue(o)->isC)
#define isLfunction(o) (ttype(o) == LUA_TFUNCTION && !clvalue(o)->isC)
/*
** Tables
*/
typedef struct TKey
{
::Value value;
int extra[LUA_EXTRA_SIZE];
unsigned tt : 4;
int next : 28; // for chaining
} TKey;
typedef struct LuaNode
{
TValue val;
TKey key;
} LuaNode;
// copy a value into a key
#define setnodekey(L, node, obj) \
{ \
LuaNode* n_ = (node); \
const TValue* i_o = (obj); \
n_->key.value = i_o->value; \
memcpy(n_->key.extra, i_o->extra, sizeof(n_->key.extra)); \
n_->key.tt = i_o->tt; \
checkliveness(L->global, i_o); \
}
// copy a value from a key
#define getnodekey(L, obj, node) \
{ \
TValue* i_o = (obj); \
const LuaNode* n_ = (node); \
i_o->value = n_->key.value; \
memcpy(i_o->extra, n_->key.extra, sizeof(i_o->extra)); \
i_o->tt = n_->key.tt; \
checkliveness(L->global, i_o); \
}
// clang-format off
typedef struct Table
{
CommonHeader;
uint8_t tmcache; // 1<<p means tagmethod(p) is not present
uint8_t readonly; // sandboxing feature to prohibit writes to table
uint8_t safeenv; // environment doesn't share globals with other scripts
uint8_t lsizenode; // log2 of size of `node' array
uint8_t nodemask8; // (1<<lsizenode)-1, truncated to 8 bits
int sizearray; // size of `array' array
union
{
int lastfree; // any free position is before this position
int aboundary; // negated 'boundary' of `array' array; iff aboundary < 0
};
struct Table* metatable;
TValue* array; // array part
LuaNode* node;
GCObject* gclist;
} Table;
// clang-format on
/*
** `module' operation for hashing (size is always a power of 2)
*/
#define lmod(s, size) (check_exp((size & (size - 1)) == 0, (cast_to(int, (s) & ((size)-1)))))
#define twoto(x) ((int)(1 << (x)))
#define sizenode(t) (twoto((t)->lsizenode))
#define luaO_nilobject (&luaO_nilobject_)
LUAI_DATA const TValue luaO_nilobject_;
#define ceillog2(x) (luaO_log2((x)-1) + 1)
LUAI_FUNC int luaO_log2(unsigned int x);
LUAI_FUNC int luaO_rawequalObj(const TValue* t1, const TValue* t2);
LUAI_FUNC int luaO_rawequalKey(const TKey* t1, const TValue* t2);
LUAI_FUNC int luaO_str2d(const char* s, double* result);
LUAI_FUNC const char* luaO_pushvfstring(lua_State* L, const char* fmt, va_list argp);
LUAI_FUNC const char* luaO_pushfstring(lua_State* L, const char* fmt, ...);
LUAI_FUNC const char* luaO_chunkid(char* buf, size_t buflen, const char* source, size_t srclen);

View File

@ -1,208 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include <string.h>
#include <time.h>
#define LUA_STRFTIMEOPTIONS "aAbBcdHIjmMpSUwWxXyYzZ%"
#if defined(_WIN32)
static tm* gmtime_r(const time_t* timep, tm* result)
{
return gmtime_s(result, timep) == 0 ? result : NULL;
}
static tm* localtime_r(const time_t* timep, tm* result)
{
return localtime_s(result, timep) == 0 ? result : NULL;
}
static time_t timegm(struct tm* timep)
{
return _mkgmtime(timep);
}
#elif defined(__FreeBSD__)
static tm* gmtime_r(const time_t* timep, tm* result)
{
return gmtime_s(timep, result) == 0 ? result : NULL;
}
static tm* localtime_r(const time_t* timep, tm* result)
{
return localtime_s(timep, result) == 0 ? result : NULL;
}
static time_t timegm(struct tm* timep)
{
return mktime(timep);
}
#endif
static int os_clock(lua_State* L)
{
lua_pushnumber(L, lua_clock());
return 1;
}
/*
** {======================================================
** Time/Date operations
** { year=%Y, month=%m, day=%d, hour=%H, min=%M, sec=%S,
** wday=%w+1, yday=%j, isdst=? }
** =======================================================
*/
static void setfield(lua_State* L, const char* key, int value)
{
lua_pushinteger(L, value);
lua_setfield(L, -2, key);
}
static void setboolfield(lua_State* L, const char* key, int value)
{
if (value < 0) // undefined?
return; // does not set field
lua_pushboolean(L, value);
lua_setfield(L, -2, key);
}
static int getboolfield(lua_State* L, const char* key)
{
int res;
lua_rawgetfield(L, -1, key);
res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1);
lua_pop(L, 1);
return res;
}
static int getfield(lua_State* L, const char* key, int d)
{
int res;
lua_rawgetfield(L, -1, key);
if (lua_isnumber(L, -1))
res = (int)lua_tointeger(L, -1);
else
{
if (d < 0)
luaL_error(L, "field '%s' missing in date table", key);
res = d;
}
lua_pop(L, 1);
return res;
}
static int os_date(lua_State* L)
{
const char* s = luaL_optstring(L, 1, "%c");
time_t t = luaL_opt(L, (time_t)luaL_checknumber, 2, time(NULL));
struct tm tm;
struct tm* stm;
if (*s == '!')
{ // UTC?
stm = gmtime_r(&t, &tm);
s++; // skip `!'
}
else
{
// on Windows, localtime() fails with dates before epoch start so we disallow that
stm = t < 0 ? NULL : localtime_r(&t, &tm);
}
if (stm == NULL) // invalid date?
{
lua_pushnil(L);
}
else if (strcmp(s, "*t") == 0)
{
lua_createtable(L, 0, 9); // 9 = number of fields
setfield(L, "sec", stm->tm_sec);
setfield(L, "min", stm->tm_min);
setfield(L, "hour", stm->tm_hour);
setfield(L, "day", stm->tm_mday);
setfield(L, "month", stm->tm_mon + 1);
setfield(L, "year", stm->tm_year + 1900);
setfield(L, "wday", stm->tm_wday + 1);
setfield(L, "yday", stm->tm_yday + 1);
setboolfield(L, "isdst", stm->tm_isdst);
}
else
{
char cc[3];
cc[0] = '%';
cc[2] = '\0';
luaL_Buffer b;
luaL_buffinit(L, &b);
for (; *s; s++)
{
if (*s != '%' || *(s + 1) == '\0') // no conversion specifier?
{
luaL_addchar(&b, *s);
}
else if (strchr(LUA_STRFTIMEOPTIONS, *(s + 1)) == 0)
{
luaL_argerror(L, 1, "invalid conversion specifier");
}
else
{
size_t reslen;
char buff[200]; // should be big enough for any conversion result
cc[1] = *(++s);
reslen = strftime(buff, sizeof(buff), cc, stm);
luaL_addlstring(&b, buff, reslen, -1);
}
}
luaL_pushresult(&b);
}
return 1;
}
static int os_time(lua_State* L)
{
time_t t;
if (lua_isnoneornil(L, 1)) // called without args?
t = time(NULL); // get current time
else
{
struct tm ts;
luaL_checktype(L, 1, LUA_TTABLE);
lua_settop(L, 1); // make sure table is at the top
ts.tm_sec = getfield(L, "sec", 0);
ts.tm_min = getfield(L, "min", 0);
ts.tm_hour = getfield(L, "hour", 12);
ts.tm_mday = getfield(L, "day", -1);
ts.tm_mon = getfield(L, "month", -1) - 1;
ts.tm_year = getfield(L, "year", -1) - 1900;
ts.tm_isdst = getboolfield(L, "isdst");
// Note: upstream Lua uses mktime() here which assumes input is local time, but we prefer UTC for consistency
t = timegm(&ts);
}
if (t == (time_t)(-1))
lua_pushnil(L);
else
lua_pushnumber(L, (double)t);
return 1;
}
static int os_difftime(lua_State* L)
{
lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)), (time_t)(luaL_optnumber(L, 2, 0))));
return 1;
}
static const luaL_Reg syslib[] = {
{"clock", os_clock},
{"date", os_date},
{"difftime", os_difftime},
{"time", os_time},
{NULL, NULL},
};
int luaopen_os(lua_State* L)
{
luaL_register(L, LUA_OSLIBNAME, syslib);
return 1;
}

View File

@ -1,63 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lua.h"
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <Windows.h>
#endif
#ifdef __APPLE__
#include <mach/mach.h>
#include <mach/mach_time.h>
#endif
#include <time.h>
static double clock_period()
{
#if defined(_WIN32)
LARGE_INTEGER result = {};
QueryPerformanceFrequency(&result);
return 1.0 / double(result.QuadPart);
#elif defined(__APPLE__)
mach_timebase_info_data_t result = {};
mach_timebase_info(&result);
return double(result.numer) / double(result.denom) * 1e-9;
#elif defined(__linux__)
return 1e-9;
#else
return 1.0 / double(CLOCKS_PER_SEC);
#endif
}
static double clock_timestamp()
{
#if defined(_WIN32)
LARGE_INTEGER result = {};
QueryPerformanceCounter(&result);
return double(result.QuadPart);
#elif defined(__APPLE__)
return double(mach_absolute_time());
#elif defined(__linux__)
timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return now.tv_sec * 1e9 + now.tv_nsec;
#else
return double(clock());
#endif
}
double lua_clock()
{
static double period = clock_period();
return clock_timestamp() * period;
}

View File

@ -1,244 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lstate.h"
#include "ltable.h"
#include "lstring.h"
#include "lfunc.h"
#include "lmem.h"
#include "lgc.h"
#include "ldo.h"
#include "ldebug.h"
/*
** Main thread combines a thread state and the global state
*/
typedef struct LG
{
lua_State l;
global_State g;
} LG;
static void stack_init(lua_State* L1, lua_State* L)
{
// initialize CallInfo array
L1->base_ci = luaM_newarray(L, BASIC_CI_SIZE, CallInfo, L1->memcat);
L1->ci = L1->base_ci;
L1->size_ci = BASIC_CI_SIZE;
L1->end_ci = L1->base_ci + L1->size_ci - 1;
// initialize stack array
L1->stack = luaM_newarray(L, BASIC_STACK_SIZE + EXTRA_STACK, TValue, L1->memcat);
L1->stacksize = BASIC_STACK_SIZE + EXTRA_STACK;
TValue* stack = L1->stack;
for (int i = 0; i < BASIC_STACK_SIZE + EXTRA_STACK; i++)
setnilvalue(stack + i); // erase new stack
L1->top = stack;
L1->stack_last = stack + (L1->stacksize - EXTRA_STACK);
// initialize first ci
L1->ci->func = L1->top;
setnilvalue(L1->top++); // `function' entry for this `ci'
L1->base = L1->ci->base = L1->top;
L1->ci->top = L1->top + LUA_MINSTACK;
}
static void freestack(lua_State* L, lua_State* L1)
{
luaM_freearray(L, L1->base_ci, L1->size_ci, CallInfo, L1->memcat);
luaM_freearray(L, L1->stack, L1->stacksize, TValue, L1->memcat);
}
/*
** open parts that may cause memory-allocation errors
*/
static void f_luaopen(lua_State* L, void* ud)
{
global_State* g = L->global;
stack_init(L, L); // init stack
L->gt = luaH_new(L, 0, 2); // table of globals
sethvalue(L, registry(L), luaH_new(L, 0, 2)); // registry
luaS_resize(L, LUA_MINSTRTABSIZE); // initial size of string table
luaT_init(L);
luaS_fix(luaS_newliteral(L, LUA_MEMERRMSG)); // pin to make sure we can always throw this error
luaS_fix(luaS_newliteral(L, LUA_ERRERRMSG)); // pin to make sure we can always throw this error
g->GCthreshold = 4 * g->totalbytes;
}
static void preinit_state(lua_State* L, global_State* g)
{
L->global = g;
L->stack = NULL;
L->stacksize = 0;
L->gt = NULL;
L->openupval = NULL;
L->size_ci = 0;
L->nCcalls = L->baseCcalls = 0;
L->status = 0;
L->base_ci = L->ci = NULL;
L->namecall = NULL;
L->cachedslot = 0;
L->singlestep = false;
L->isactive = false;
L->activememcat = 0;
L->userdata = NULL;
}
static void close_state(lua_State* L)
{
global_State* g = L->global;
luaF_close(L, L->stack); // close all upvalues for this thread
luaC_freeall(L); // collect all objects
LUAU_ASSERT(g->strt.nuse == 0);
luaM_freearray(L, L->global->strt.hash, L->global->strt.size, TString*, 0);
freestack(L, L);
for (int i = 0; i < LUA_SIZECLASSES; i++)
{
LUAU_ASSERT(g->freepages[i] == NULL);
LUAU_ASSERT(g->freegcopages[i] == NULL);
}
LUAU_ASSERT(g->allgcopages == NULL);
LUAU_ASSERT(g->totalbytes == sizeof(LG));
LUAU_ASSERT(g->memcatbytes[0] == sizeof(LG));
for (int i = 1; i < LUA_MEMORY_CATEGORIES; i++)
LUAU_ASSERT(g->memcatbytes[i] == 0);
#if LUA_CUSTOM_EXECUTION
if (L->global->ecb.close)
L->global->ecb.close(L);
#endif
(*g->frealloc)(g->ud, L, sizeof(LG), 0);
}
lua_State* luaE_newthread(lua_State* L)
{
lua_State* L1 = luaM_newgco(L, lua_State, sizeof(lua_State), L->activememcat);
luaC_init(L, L1, LUA_TTHREAD);
preinit_state(L1, L->global);
L1->activememcat = L->activememcat; // inherit the active memory category
stack_init(L1, L); // init stack
L1->gt = L->gt; // share table of globals
L1->singlestep = L->singlestep;
LUAU_ASSERT(iswhite(obj2gco(L1)));
return L1;
}
void luaE_freethread(lua_State* L, lua_State* L1, lua_Page* page)
{
global_State* g = L->global;
if (g->cb.userthread)
g->cb.userthread(NULL, L1);
freestack(L, L1);
luaM_freegco(L, L1, sizeof(lua_State), L1->memcat, page);
}
void lua_resetthread(lua_State* L)
{
// close upvalues before clearing anything
luaF_close(L, L->stack);
// clear call frames
CallInfo* ci = L->base_ci;
ci->func = L->stack;
ci->base = ci->func + 1;
ci->top = ci->base + LUA_MINSTACK;
setnilvalue(ci->func);
L->ci = ci;
if (L->size_ci != BASIC_CI_SIZE)
luaD_reallocCI(L, BASIC_CI_SIZE);
// clear thread state
L->status = LUA_OK;
L->base = L->ci->base;
L->top = L->ci->base;
L->nCcalls = L->baseCcalls = 0;
// clear thread stack
if (L->stacksize != BASIC_STACK_SIZE + EXTRA_STACK)
luaD_reallocstack(L, BASIC_STACK_SIZE);
for (int i = 0; i < L->stacksize; i++)
setnilvalue(L->stack + i);
}
int lua_isthreadreset(lua_State* L)
{
return L->ci == L->base_ci && L->base == L->top && L->status == LUA_OK;
}
lua_State* lua_newstate(lua_Alloc f, void* ud)
{
int i;
lua_State* L;
global_State* g;
void* l = (*f)(ud, NULL, 0, sizeof(LG));
if (l == NULL)
return NULL;
L = (lua_State*)l;
g = &((LG*)L)->g;
L->tt = LUA_TTHREAD;
L->marked = g->currentwhite = bit2mask(WHITE0BIT, FIXEDBIT);
L->memcat = 0;
preinit_state(L, g);
g->frealloc = f;
g->ud = ud;
g->mainthread = L;
g->uvhead.u.open.prev = &g->uvhead;
g->uvhead.u.open.next = &g->uvhead;
g->GCthreshold = 0; // mark it as unfinished state
g->registryfree = 0;
g->errorjmp = NULL;
g->rngstate = 0;
g->ptrenckey[0] = 1;
g->ptrenckey[1] = 0;
g->ptrenckey[2] = 0;
g->ptrenckey[3] = 0;
g->strt.size = 0;
g->strt.nuse = 0;
g->strt.hash = NULL;
setnilvalue(&g->pseudotemp);
setnilvalue(registry(L));
g->gcstate = GCSpause;
g->gray = NULL;
g->grayagain = NULL;
g->weak = NULL;
g->totalbytes = sizeof(LG);
g->gcgoal = LUAI_GCGOAL;
g->gcstepmul = LUAI_GCSTEPMUL;
g->gcstepsize = LUAI_GCSTEPSIZE << 10;
for (i = 0; i < LUA_SIZECLASSES; i++)
{
g->freepages[i] = NULL;
g->freegcopages[i] = NULL;
}
g->allgcopages = NULL;
g->sweepgcopage = NULL;
for (i = 0; i < LUA_T_COUNT; i++)
g->mt[i] = NULL;
for (i = 0; i < LUA_UTAG_LIMIT; i++)
g->udatagc[i] = NULL;
for (i = 0; i < LUA_MEMORY_CATEGORIES; i++)
g->memcatbytes[i] = 0;
g->memcatbytes[0] = sizeof(LG);
g->cb = lua_Callbacks();
g->ecb = lua_ExecutionCallbacks();
g->gcstats = GCStats();
#ifdef LUAI_GCMETRICS
g->gcmetrics = GCMetrics();
#endif
if (luaD_rawrunprotected(L, f_luaopen, NULL) != 0)
{
// memory allocation error: free partial state
close_state(L);
L = NULL;
}
return L;
}
void lua_close(lua_State* L)
{
L = L->global->mainthread; // only the main thread can be closed
luaF_close(L, L->stack); // close all upvalues for this thread
close_state(L);
}

View File

@ -1,301 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
#include "ltm.h"
// registry
#define registry(L) (&L->global->registry)
// extra stack space to handle TM calls and some other extras
#define EXTRA_STACK 5
#define BASIC_CI_SIZE 8
#define BASIC_STACK_SIZE (2 * LUA_MINSTACK)
// clang-format off
typedef struct stringtable
{
TString** hash;
uint32_t nuse; // number of elements
int size;
} stringtable;
// clang-format on
/*
** informations about a call
**
** the general Lua stack frame structure is as follows:
** - each function gets a stack frame, with function "registers" being stack slots on the frame
** - function arguments are associated with registers 0+
** - function locals and temporaries follow after; usually locals are a consecutive block per scope, and temporaries are allocated after this, but
*this is up to the compiler
**
** when function doesn't have varargs, the stack layout is as follows:
** ^ (func) ^^ [fixed args] [locals + temporaries]
** where ^ is the 'func' pointer in CallInfo struct, and ^^ is the 'base' pointer (which is what registers are relative to)
**
** when function *does* have varargs, the stack layout is more complex - the runtime has to copy the fixed arguments so that the 0+ addressing still
*works as follows:
** ^ (func) [fixed args] [varargs] ^^ [fixed args] [locals + temporaries]
**
** computing the sizes of these individual blocks works as follows:
** - the number of fixed args is always matching the `numparams` in a function's Proto object; runtime adds `nil` during the call execution as
*necessary
** - the number of variadic args can be computed by evaluating (ci->base - ci->func - 1 - numparams)
**
** the CallInfo structures are allocated as an array, with each subsequent call being *appended* to this array (so if f calls g, CallInfo for g
*immediately follows CallInfo for f)
** the `nresults` field in CallInfo is set by the caller to tell the function how many arguments the caller is expecting on the stack after the
*function returns
** the `flags` field in CallInfo contains internal execution flags that are important for pcall/etc, see LUA_CALLINFO_*
*/
// clang-format off
typedef struct CallInfo
{
StkId base; // base for this function
StkId func; // function index in the stack
StkId top; // top for this function
const Instruction* savedpc;
int nresults; // expected number of results from this function
unsigned int flags; // call frame flags, see LUA_CALLINFO_*
} CallInfo;
// clang-format on
#define LUA_CALLINFO_RETURN (1 << 0) // should the interpreter return after returning from this callinfo? first frame must have this set
#define LUA_CALLINFO_HANDLE (1 << 1) // should the error thrown during execution get handled by continuation from this callinfo? func must be C
#define LUA_CALLINFO_NATIVE (1 << 2) // should this function be executed using execution callback for native code
#define curr_func(L) (clvalue(L->ci->func))
#define ci_func(ci) (clvalue((ci)->func))
#define f_isLua(ci) (!ci_func(ci)->isC)
#define isLua(ci) (ttisfunction((ci)->func) && f_isLua(ci))
struct GCStats
{
// data for proportional-integral controller of heap trigger value
int32_t triggerterms[32] = {0};
uint32_t triggertermpos = 0;
int32_t triggerintegral = 0;
size_t atomicstarttotalsizebytes = 0;
size_t endtotalsizebytes = 0;
size_t heapgoalsizebytes = 0;
double starttimestamp = 0;
double atomicstarttimestamp = 0;
double endtimestamp = 0;
};
#ifdef LUAI_GCMETRICS
struct GCCycleMetrics
{
size_t starttotalsizebytes = 0;
size_t heaptriggersizebytes = 0;
double pausetime = 0.0; // time from end of the last cycle to the start of a new one
double starttimestamp = 0.0;
double endtimestamp = 0.0;
double marktime = 0.0;
double markassisttime = 0.0;
double markmaxexplicittime = 0.0;
size_t markexplicitsteps = 0;
size_t markwork = 0;
double atomicstarttimestamp = 0.0;
size_t atomicstarttotalsizebytes = 0;
double atomictime = 0.0;
// specific atomic stage parts
double atomictimeupval = 0.0;
double atomictimeweak = 0.0;
double atomictimegray = 0.0;
double atomictimeclear = 0.0;
double sweeptime = 0.0;
double sweepassisttime = 0.0;
double sweepmaxexplicittime = 0.0;
size_t sweepexplicitsteps = 0;
size_t sweepwork = 0;
size_t assistwork = 0;
size_t explicitwork = 0;
size_t propagatework = 0;
size_t propagateagainwork = 0;
size_t endtotalsizebytes = 0;
};
struct GCMetrics
{
double stepexplicittimeacc = 0.0;
double stepassisttimeacc = 0.0;
// when cycle is completed, last cycle values are updated
uint64_t completedcycles = 0;
GCCycleMetrics lastcycle;
GCCycleMetrics currcycle;
};
#endif
// Callbacks that can be used to to redirect code execution from Luau bytecode VM to a custom implementation (AoT/JiT/sandboxing/...)
struct lua_ExecutionCallbacks
{
void* context;
void (*close)(lua_State* L); // called when global VM state is closed
void (*destroy)(lua_State* L, Proto* proto); // called when function is destroyed
int (*enter)(lua_State* L, Proto* proto); // called when function is about to start/resume (when execdata is present), return 0 to exit VM
void (*setbreakpoint)(lua_State* L, Proto* proto, int line); // called when a breakpoint is set in a function
};
/*
** `global state', shared by all threads of this state
*/
// clang-format off
typedef struct global_State
{
stringtable strt; // hash table for strings
lua_Alloc frealloc; // function to reallocate memory
void* ud; // auxiliary data to `frealloc'
uint8_t currentwhite;
uint8_t gcstate; // state of garbage collector
GCObject* gray; // list of gray objects
GCObject* grayagain; // list of objects to be traversed atomically
GCObject* weak; // list of weak tables (to be cleared)
size_t GCthreshold; // when totalbytes > GCthreshold, run GC step
size_t totalbytes; // number of bytes currently allocated
int gcgoal; // see LUAI_GCGOAL
int gcstepmul; // see LUAI_GCSTEPMUL
int gcstepsize; // see LUAI_GCSTEPSIZE
struct lua_Page* freepages[LUA_SIZECLASSES]; // free page linked list for each size class for non-collectable objects
struct lua_Page* freegcopages[LUA_SIZECLASSES]; // free page linked list for each size class for collectable objects
struct lua_Page* allgcopages; // page linked list with all pages for all classes
struct lua_Page* sweepgcopage; // position of the sweep in `allgcopages'
size_t memcatbytes[LUA_MEMORY_CATEGORIES]; // total amount of memory used by each memory category
struct lua_State* mainthread;
UpVal uvhead; // head of double-linked list of all open upvalues
struct Table* mt[LUA_T_COUNT]; // metatables for basic types
TString* ttname[LUA_T_COUNT]; // names for basic types
TString* tmname[TM_N]; // array with tag-method names
TValue pseudotemp; // storage for temporary values used in pseudo2addr
TValue registry; // registry table, used by lua_ref and LUA_REGISTRYINDEX
int registryfree; // next free slot in registry
struct lua_jmpbuf* errorjmp; // jump buffer data for longjmp-style error handling
uint64_t rngstate; // PCG random number generator state
uint64_t ptrenckey[4]; // pointer encoding key for display
lua_Callbacks cb;
lua_ExecutionCallbacks ecb;
void (*udatagc[LUA_UTAG_LIMIT])(lua_State*, void*); // for each userdata tag, a gc callback to be called immediately before freeing memory
GCStats gcstats;
#ifdef LUAI_GCMETRICS
GCMetrics gcmetrics;
#endif
} global_State;
// clang-format on
/*
** `per thread' state
*/
// clang-format off
struct lua_State
{
CommonHeader;
uint8_t status;
uint8_t activememcat; // memory category that is used for new GC object allocations
bool isactive; // thread is currently executing, stack may be mutated without barriers
bool singlestep; // call debugstep hook after each instruction
StkId top; // first free slot in the stack
StkId base; // base of current function
global_State* global;
CallInfo* ci; // call info for current function
StkId stack_last; // last free slot in the stack
StkId stack; // stack base
CallInfo* end_ci; // points after end of ci array
CallInfo* base_ci; // array of CallInfo's
int stacksize;
int size_ci; // size of array `base_ci'
unsigned short nCcalls; // number of nested C calls
unsigned short baseCcalls; // nested C calls when resuming coroutine
int cachedslot; // when table operations or INDEX/NEWINDEX is invoked from Luau, what is the expected slot for lookup?
Table* gt; // table of globals
UpVal* openupval; // list of open upvalues in this stack
GCObject* gclist;
TString* namecall; // when invoked from Luau using NAMECALL, what method do we need to invoke?
void* userdata;
};
// clang-format on
/*
** Union of all collectible objects
*/
union GCObject
{
GCheader gch;
struct TString ts;
struct Udata u;
struct Closure cl;
struct Table h;
struct Proto p;
struct UpVal uv;
struct lua_State th; // thread
};
// macros to convert a GCObject into a specific value
#define gco2ts(o) check_exp((o)->gch.tt == LUA_TSTRING, &((o)->ts))
#define gco2u(o) check_exp((o)->gch.tt == LUA_TUSERDATA, &((o)->u))
#define gco2cl(o) check_exp((o)->gch.tt == LUA_TFUNCTION, &((o)->cl))
#define gco2h(o) check_exp((o)->gch.tt == LUA_TTABLE, &((o)->h))
#define gco2p(o) check_exp((o)->gch.tt == LUA_TPROTO, &((o)->p))
#define gco2uv(o) check_exp((o)->gch.tt == LUA_TUPVAL, &((o)->uv))
#define gco2th(o) check_exp((o)->gch.tt == LUA_TTHREAD, &((o)->th))
// macro to convert any Lua object into a GCObject
#define obj2gco(v) check_exp(iscollectable(v), cast_to(GCObject*, (v) + 0))
LUAI_FUNC lua_State* luaE_newthread(lua_State* L);
LUAI_FUNC void luaE_freethread(lua_State* L, lua_State* L1, struct lua_Page* page);

View File

@ -1,193 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lstring.h"
#include "lgc.h"
#include "lmem.h"
#include <string.h>
unsigned int luaS_hash(const char* str, size_t len)
{
// Note that this hashing algorithm is replicated in BytecodeBuilder.cpp, BytecodeBuilder::getStringHash
unsigned int a = 0, b = 0;
unsigned int h = unsigned(len);
// hash prefix in 12b chunks (using aligned reads) with ARX based hash (LuaJIT v2.1, lookup3)
// note that we stop at length<32 to maintain compatibility with Lua 5.1
while (len >= 32)
{
#define rol(x, s) ((x >> s) | (x << (32 - s)))
#define mix(u, v, w) a ^= h, a -= rol(h, u), b ^= a, b -= rol(a, v), h ^= b, h -= rol(b, w)
// should compile into fast unaligned reads
uint32_t block[3];
memcpy(block, str, 12);
a += block[0];
b += block[1];
h += block[2];
mix(14, 11, 25);
str += 12;
len -= 12;
#undef mix
#undef rol
}
// original Lua 5.1 hash for compatibility (exact match when len<32)
for (size_t i = len; i > 0; --i)
h ^= (h << 5) + (h >> 2) + (uint8_t)str[i - 1];
return h;
}
void luaS_resize(lua_State* L, int newsize)
{
TString** newhash = luaM_newarray(L, newsize, TString*, 0);
stringtable* tb = &L->global->strt;
for (int i = 0; i < newsize; i++)
newhash[i] = NULL;
// rehash
for (int i = 0; i < tb->size; i++)
{
TString* p = tb->hash[i];
while (p)
{ // for each node in the list
TString* next = p->next; // save next
unsigned int h = p->hash;
int h1 = lmod(h, newsize); // new position
LUAU_ASSERT(cast_int(h % newsize) == lmod(h, newsize));
p->next = newhash[h1]; // chain it
newhash[h1] = p;
p = next;
}
}
luaM_freearray(L, tb->hash, tb->size, TString*, 0);
tb->size = newsize;
tb->hash = newhash;
}
static TString* newlstr(lua_State* L, const char* str, size_t l, unsigned int h)
{
if (l > MAXSSIZE)
luaM_toobig(L);
TString* ts = luaM_newgco(L, TString, sizestring(l), L->activememcat);
luaC_init(L, ts, LUA_TSTRING);
ts->atom = ATOM_UNDEF;
ts->hash = h;
ts->len = unsigned(l);
memcpy(ts->data, str, l);
ts->data[l] = '\0'; // ending 0
stringtable* tb = &L->global->strt;
h = lmod(h, tb->size);
ts->next = tb->hash[h]; // chain new entry
tb->hash[h] = ts;
tb->nuse++;
if (tb->nuse > cast_to(uint32_t, tb->size) && tb->size <= INT_MAX / 2)
luaS_resize(L, tb->size * 2); // too crowded
return ts;
}
TString* luaS_bufstart(lua_State* L, size_t size)
{
if (size > MAXSSIZE)
luaM_toobig(L);
TString* ts = luaM_newgco(L, TString, sizestring(size), L->activememcat);
luaC_init(L, ts, LUA_TSTRING);
ts->atom = ATOM_UNDEF;
ts->hash = 0; // computed in luaS_buffinish
ts->len = unsigned(size);
ts->next = NULL;
return ts;
}
TString* luaS_buffinish(lua_State* L, TString* ts)
{
unsigned int h = luaS_hash(ts->data, ts->len);
stringtable* tb = &L->global->strt;
int bucket = lmod(h, tb->size);
// search if we already have this string in the hash table
for (TString* el = tb->hash[bucket]; el != NULL; el = el->next)
{
if (el->len == ts->len && memcmp(el->data, ts->data, ts->len) == 0)
{
// string may be dead
if (isdead(L->global, obj2gco(el)))
changewhite(obj2gco(el));
return el;
}
}
LUAU_ASSERT(ts->next == NULL);
ts->hash = h;
ts->data[ts->len] = '\0'; // ending 0
ts->atom = ATOM_UNDEF;
ts->next = tb->hash[bucket]; // chain new entry
tb->hash[bucket] = ts;
tb->nuse++;
if (tb->nuse > cast_to(uint32_t, tb->size) && tb->size <= INT_MAX / 2)
luaS_resize(L, tb->size * 2); // too crowded
return ts;
}
TString* luaS_newlstr(lua_State* L, const char* str, size_t l)
{
unsigned int h = luaS_hash(str, l);
for (TString* el = L->global->strt.hash[lmod(h, L->global->strt.size)]; el != NULL; el = el->next)
{
if (el->len == l && (memcmp(str, getstr(el), l) == 0))
{
// string may be dead
if (isdead(L->global, obj2gco(el)))
changewhite(obj2gco(el));
return el;
}
}
return newlstr(L, str, l, h); // not found
}
static bool unlinkstr(lua_State* L, TString* ts)
{
global_State* g = L->global;
TString** p = &g->strt.hash[lmod(ts->hash, g->strt.size)];
while (TString* curr = *p)
{
if (curr == ts)
{
*p = curr->next;
return true;
}
else
{
p = &curr->next;
}
}
return false;
}
void luaS_free(lua_State* L, TString* ts, lua_Page* page)
{
if (unlinkstr(L, ts))
L->global->strt.nuse--;
else
LUAU_ASSERT(ts->next == NULL); // orphaned string buffer
luaM_freegco(L, ts, sizestring(ts->len), ts->memcat, page);
}

View File

@ -1,29 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
#include "lstate.h"
// string size limit
#define MAXSSIZE (1 << 30)
// string atoms are not defined by default; the storage is 16-bit integer
#define ATOM_UNDEF -32768
#define sizestring(len) (offsetof(TString, data) + len + 1)
#define luaS_new(L, s) (luaS_newlstr(L, s, strlen(s)))
#define luaS_newliteral(L, s) (luaS_newlstr(L, "" s, (sizeof(s) / sizeof(char)) - 1))
#define luaS_fix(s) l_setbit((s)->marked, FIXEDBIT)
LUAI_FUNC unsigned int luaS_hash(const char* str, size_t len);
LUAI_FUNC void luaS_resize(lua_State* L, int newsize);
LUAI_FUNC TString* luaS_newlstr(lua_State* L, const char* str, size_t l);
LUAI_FUNC void luaS_free(lua_State* L, TString* ts, struct lua_Page* page);
LUAI_FUNC TString* luaS_bufstart(lua_State* L, size_t size);
LUAI_FUNC TString* luaS_buffinish(lua_State* L, TString* ts);

File diff suppressed because it is too large Load Diff

View File

@ -1,859 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
/*
* Implementation of tables (aka arrays, objects, or hash tables).
*
* Tables keep the elements in two parts: an array part and a hash part.
* Integer keys >=1 are all candidates to be kept in the array part. The actual size of the array is the
* largest n such that at least half the slots between 0 and n are in use.
* Hash uses a mix of chained scatter table with Brent's variation.
*
* A main invariant of these tables is that, if an element is not in its main position (i.e. the original
* position that its hash gives to it), then the colliding element is in its own main position.
* Hence even when the load factor reaches 100%, performance remains good.
*
* Table keys can be arbitrary values unless they contain NaN. Keys are hashed and compared using raw equality,
* so even if the key is a userdata with an overridden __eq, it's not used during hash lookups.
*
* Each table has a "boundary", defined as the index k where t[k] ~= nil and t[k+1] == nil. The boundary can be
* computed using a binary search and can be adjusted when the table is modified; crucially, Luau enforces an
* invariant where the boundary must be in the array part - this enforces a consistent iteration order through the
* prefix of the table when using pairs(), and allows to implement algorithms that access elements in 1..#t range
* more efficiently.
*/
#include "ltable.h"
#include "lstate.h"
#include "ldebug.h"
#include "lgc.h"
#include "lmem.h"
#include "lnumutils.h"
#include <string.h>
// max size of both array and hash part is 2^MAXBITS
#define MAXBITS 26
#define MAXSIZE (1 << MAXBITS)
static_assert(offsetof(LuaNode, val) == 0, "Unexpected Node memory layout, pointer cast in gval2slot is incorrect");
// TKey is bitpacked for memory efficiency so we need to validate bit counts for worst case
static_assert(TKey{{NULL}, {0}, LUA_TDEADKEY, 0}.tt == LUA_TDEADKEY, "not enough bits for tt");
static_assert(TKey{{NULL}, {0}, LUA_TNIL, MAXSIZE - 1}.next == MAXSIZE - 1, "not enough bits for next");
static_assert(TKey{{NULL}, {0}, LUA_TNIL, -(MAXSIZE - 1)}.next == -(MAXSIZE - 1), "not enough bits for next");
// empty hash data points to dummynode so that we can always dereference it
const LuaNode luaH_dummynode = {
{{NULL}, {0}, LUA_TNIL}, // value
{{NULL}, {0}, LUA_TNIL, 0} // key
};
#define dummynode (&luaH_dummynode)
// hash is always reduced mod 2^k
#define hashpow2(t, n) (gnode(t, lmod((n), sizenode(t))))
#define hashstr(t, str) hashpow2(t, (str)->hash)
#define hashboolean(t, p) hashpow2(t, p)
static LuaNode* hashpointer(const Table* t, const void* p)
{
// we discard the high 32-bit portion of the pointer on 64-bit platforms as it doesn't carry much entropy anyway
unsigned int h = unsigned(uintptr_t(p));
// MurmurHash3 32-bit finalizer
h ^= h >> 16;
h *= 0x85ebca6bu;
h ^= h >> 13;
h *= 0xc2b2ae35u;
h ^= h >> 16;
return hashpow2(t, h);
}
static LuaNode* hashnum(const Table* t, double n)
{
static_assert(sizeof(double) == sizeof(unsigned int) * 2, "expected a 8-byte double");
unsigned int i[2];
memcpy(i, &n, sizeof(i));
// mask out sign bit to make sure -0 and 0 hash to the same value
uint32_t h1 = i[0];
uint32_t h2 = i[1] & 0x7fffffff;
// finalizer from MurmurHash64B
const uint32_t m = 0x5bd1e995;
h1 ^= h2 >> 18;
h1 *= m;
h2 ^= h1 >> 22;
h2 *= m;
h1 ^= h2 >> 17;
h1 *= m;
h2 ^= h1 >> 19;
h2 *= m;
// ... truncated to 32-bit output (normally hash is equal to (uint64_t(h1) << 32) | h2, but we only really need the lower 32-bit half)
return hashpow2(t, h2);
}
static LuaNode* hashvec(const Table* t, const float* v)
{
unsigned int i[LUA_VECTOR_SIZE];
memcpy(i, v, sizeof(i));
// convert -0 to 0 to make sure they hash to the same value
i[0] = (i[0] == 0x80000000) ? 0 : i[0];
i[1] = (i[1] == 0x80000000) ? 0 : i[1];
i[2] = (i[2] == 0x80000000) ? 0 : i[2];
// scramble bits to make sure that integer coordinates have entropy in lower bits
i[0] ^= i[0] >> 17;
i[1] ^= i[1] >> 17;
i[2] ^= i[2] >> 17;
// Optimized Spatial Hashing for Collision Detection of Deformable Objects
unsigned int h = (i[0] * 73856093) ^ (i[1] * 19349663) ^ (i[2] * 83492791);
#if LUA_VECTOR_SIZE == 4
i[3] = (i[3] == 0x80000000) ? 0 : i[3];
i[3] ^= i[3] >> 17;
h ^= i[3] * 39916801;
#endif
return hashpow2(t, h);
}
/*
** returns the `main' position of an element in a table (that is, the index
** of its hash value)
*/
static LuaNode* mainposition(const Table* t, const TValue* key)
{
switch (ttype(key))
{
case LUA_TNUMBER:
return hashnum(t, nvalue(key));
case LUA_TVECTOR:
return hashvec(t, vvalue(key));
case LUA_TSTRING:
return hashstr(t, tsvalue(key));
case LUA_TBOOLEAN:
return hashboolean(t, bvalue(key));
case LUA_TLIGHTUSERDATA:
return hashpointer(t, pvalue(key));
default:
return hashpointer(t, gcvalue(key));
}
}
/*
** returns the index for `key' if `key' is an appropriate key to live in
** the array part of the table, -1 otherwise.
*/
static int arrayindex(double key)
{
int i;
luai_num2int(i, key);
return luai_numeq(cast_num(i), key) ? i : -1;
}
/*
** returns the index of a `key' for table traversals. First goes all
** elements in the array part, then elements in the hash part. The
** beginning of a traversal is signalled by -1.
*/
static int findindex(lua_State* L, Table* t, StkId key)
{
int i;
if (ttisnil(key))
return -1; // first iteration
i = ttisnumber(key) ? arrayindex(nvalue(key)) : -1;
if (0 < i && i <= t->sizearray) // is `key' inside array part?
return i - 1; // yes; that's the index (corrected to C)
else
{
LuaNode* n = mainposition(t, key);
for (;;)
{ // check whether `key' is somewhere in the chain
// key may be dead already, but it is ok to use it in `next'
if (luaO_rawequalKey(gkey(n), key) || (ttype(gkey(n)) == LUA_TDEADKEY && iscollectable(key) && gcvalue(gkey(n)) == gcvalue(key)))
{
i = cast_int(n - gnode(t, 0)); // key index in hash table
// hash elements are numbered after array ones
return i + t->sizearray;
}
if (gnext(n) == 0)
break;
n += gnext(n);
}
luaG_runerror(L, "invalid key to 'next'"); // key not found
}
}
int luaH_next(lua_State* L, Table* t, StkId key)
{
int i = findindex(L, t, key); // find original element
for (i++; i < t->sizearray; i++)
{ // try first array part
if (!ttisnil(&t->array[i]))
{ // a non-nil value?
setnvalue(key, cast_num(i + 1));
setobj2s(L, key + 1, &t->array[i]);
return 1;
}
}
for (i -= t->sizearray; i < sizenode(t); i++)
{ // then hash part
if (!ttisnil(gval(gnode(t, i))))
{ // a non-nil value?
getnodekey(L, key, gnode(t, i));
setobj2s(L, key + 1, gval(gnode(t, i)));
return 1;
}
}
return 0; // no more elements
}
/*
** {=============================================================
** Rehash
** ==============================================================
*/
#define maybesetaboundary(t, boundary) \
{ \
if (t->aboundary <= 0) \
t->aboundary = -int(boundary); \
}
#define getaboundary(t) (t->aboundary < 0 ? -t->aboundary : t->sizearray)
static int computesizes(int nums[], int* narray)
{
int i;
int twotoi; // 2^i
int a = 0; // number of elements smaller than 2^i
int na = 0; // number of elements to go to array part
int n = 0; // optimal size for array part
for (i = 0, twotoi = 1; twotoi / 2 < *narray; i++, twotoi *= 2)
{
if (nums[i] > 0)
{
a += nums[i];
if (a > twotoi / 2)
{ // more than half elements present?
n = twotoi; // optimal size (till now)
na = a; // all elements smaller than n will go to array part
}
}
if (a == *narray)
break; // all elements already counted
}
*narray = n;
LUAU_ASSERT(*narray / 2 <= na && na <= *narray);
return na;
}
static int countint(double key, int* nums)
{
int k = arrayindex(key);
if (0 < k && k <= MAXSIZE)
{ // is `key' an appropriate array index?
nums[ceillog2(k)]++; // count as such
return 1;
}
else
return 0;
}
static int numusearray(const Table* t, int* nums)
{
int lg;
int ttlg; // 2^lg
int ause = 0; // summation of `nums'
int i = 1; // count to traverse all array keys
for (lg = 0, ttlg = 1; lg <= MAXBITS; lg++, ttlg *= 2)
{ // for each slice
int lc = 0; // counter
int lim = ttlg;
if (lim > t->sizearray)
{
lim = t->sizearray; // adjust upper limit
if (i > lim)
break; // no more elements to count
}
// count elements in range (2^(lg-1), 2^lg]
for (; i <= lim; i++)
{
if (!ttisnil(&t->array[i - 1]))
lc++;
}
nums[lg] += lc;
ause += lc;
}
return ause;
}
static int numusehash(const Table* t, int* nums, int* pnasize)
{
int totaluse = 0; // total number of elements
int ause = 0; // summation of `nums'
int i = sizenode(t);
while (i--)
{
LuaNode* n = &t->node[i];
if (!ttisnil(gval(n)))
{
if (ttisnumber(gkey(n)))
ause += countint(nvalue(gkey(n)), nums);
totaluse++;
}
}
*pnasize += ause;
return totaluse;
}
static void setarrayvector(lua_State* L, Table* t, int size)
{
if (size > MAXSIZE)
luaG_runerror(L, "table overflow");
luaM_reallocarray(L, t->array, t->sizearray, size, TValue, t->memcat);
TValue* array = t->array;
for (int i = t->sizearray; i < size; i++)
setnilvalue(&array[i]);
t->sizearray = size;
}
static void setnodevector(lua_State* L, Table* t, int size)
{
int lsize;
if (size == 0)
{ // no elements to hash part?
t->node = cast_to(LuaNode*, dummynode); // use common `dummynode'
lsize = 0;
}
else
{
int i;
lsize = ceillog2(size);
if (lsize > MAXBITS)
luaG_runerror(L, "table overflow");
size = twoto(lsize);
t->node = luaM_newarray(L, size, LuaNode, t->memcat);
for (i = 0; i < size; i++)
{
LuaNode* n = gnode(t, i);
gnext(n) = 0;
setnilvalue(gkey(n));
setnilvalue(gval(n));
}
}
t->lsizenode = cast_byte(lsize);
t->nodemask8 = cast_byte((1 << lsize) - 1);
t->lastfree = size; // all positions are free
}
static TValue* newkey(lua_State* L, Table* t, const TValue* key);
static TValue* arrayornewkey(lua_State* L, Table* t, const TValue* key)
{
if (ttisnumber(key))
{
int k;
double n = nvalue(key);
luai_num2int(k, n);
if (luai_numeq(cast_num(k), n) && cast_to(unsigned int, k - 1) < cast_to(unsigned int, t->sizearray))
return &t->array[k - 1];
}
return newkey(L, t, key);
}
static void resize(lua_State* L, Table* t, int nasize, int nhsize)
{
if (nasize > MAXSIZE || nhsize > MAXSIZE)
luaG_runerror(L, "table overflow");
int oldasize = t->sizearray;
int oldhsize = t->lsizenode;
LuaNode* nold = t->node; // save old hash ...
if (nasize > oldasize) // array part must grow?
setarrayvector(L, t, nasize);
// create new hash part with appropriate size
setnodevector(L, t, nhsize);
// used for the migration check at the end
LuaNode* nnew = t->node;
if (nasize < oldasize)
{ // array part must shrink?
t->sizearray = nasize;
// re-insert elements from vanishing slice
for (int i = nasize; i < oldasize; i++)
{
if (!ttisnil(&t->array[i]))
{
TValue ok;
setnvalue(&ok, cast_num(i + 1));
setobjt2t(L, newkey(L, t, &ok), &t->array[i]);
}
}
// shrink array
luaM_reallocarray(L, t->array, oldasize, nasize, TValue, t->memcat);
}
// used for the migration check at the end
TValue* anew = t->array;
// re-insert elements from hash part
for (int i = twoto(oldhsize) - 1; i >= 0; i--)
{
LuaNode* old = nold + i;
if (!ttisnil(gval(old)))
{
TValue ok;
getnodekey(L, &ok, old);
setobjt2t(L, arrayornewkey(L, t, &ok), gval(old));
}
}
// make sure we haven't recursively rehashed during element migration
LUAU_ASSERT(nnew == t->node);
LUAU_ASSERT(anew == t->array);
if (nold != dummynode)
luaM_freearray(L, nold, twoto(oldhsize), LuaNode, t->memcat); // free old array
}
static int adjustasize(Table* t, int size, const TValue* ek)
{
bool tbound = t->node != dummynode || size < t->sizearray;
int ekindex = ek && ttisnumber(ek) ? arrayindex(nvalue(ek)) : -1;
// move the array size up until the boundary is guaranteed to be inside the array part
while (size + 1 == ekindex || (tbound && !ttisnil(luaH_getnum(t, size + 1))))
size++;
return size;
}
void luaH_resizearray(lua_State* L, Table* t, int nasize)
{
int nsize = (t->node == dummynode) ? 0 : sizenode(t);
int asize = adjustasize(t, nasize, NULL);
resize(L, t, asize, nsize);
}
void luaH_resizehash(lua_State* L, Table* t, int nhsize)
{
resize(L, t, t->sizearray, nhsize);
}
static void rehash(lua_State* L, Table* t, const TValue* ek)
{
int nums[MAXBITS + 1]; // nums[i] = number of keys between 2^(i-1) and 2^i
for (int i = 0; i <= MAXBITS; i++)
nums[i] = 0; // reset counts
int nasize = numusearray(t, nums); // count keys in array part
int totaluse = nasize; // all those keys are integer keys
totaluse += numusehash(t, nums, &nasize); // count keys in hash part
// count extra key
if (ttisnumber(ek))
nasize += countint(nvalue(ek), nums);
totaluse++;
// compute new size for array part
int na = computesizes(nums, &nasize);
int nh = totaluse - na;
// enforce the boundary invariant; for performance, only do hash lookups if we must
int nadjusted = adjustasize(t, nasize, ek);
// count how many extra elements belong to array part instead of hash part
int aextra = nadjusted - nasize;
if (aextra != 0)
{
// we no longer need to store those extra array elements in hash part
nh -= aextra;
// because hash nodes are twice as large as array nodes, the memory we saved for hash parts can be used by array part
// this follows the general sparse array part optimization where array is allocated when 50% occupation is reached
nasize = nadjusted + aextra;
// since the size was changed, it's again important to enforce the boundary invariant at the new size
nasize = adjustasize(t, nasize, ek);
}
// resize the table to new computed sizes
resize(L, t, nasize, nh);
}
/*
** }=============================================================
*/
Table* luaH_new(lua_State* L, int narray, int nhash)
{
Table* t = luaM_newgco(L, Table, sizeof(Table), L->activememcat);
luaC_init(L, t, LUA_TTABLE);
t->metatable = NULL;
t->tmcache = cast_byte(~0);
t->array = NULL;
t->sizearray = 0;
t->lastfree = 0;
t->lsizenode = 0;
t->readonly = 0;
t->safeenv = 0;
t->nodemask8 = 0;
t->node = cast_to(LuaNode*, dummynode);
if (narray > 0)
setarrayvector(L, t, narray);
if (nhash > 0)
setnodevector(L, t, nhash);
return t;
}
void luaH_free(lua_State* L, Table* t, lua_Page* page)
{
if (t->node != dummynode)
luaM_freearray(L, t->node, sizenode(t), LuaNode, t->memcat);
if (t->array)
luaM_freearray(L, t->array, t->sizearray, TValue, t->memcat);
luaM_freegco(L, t, sizeof(Table), t->memcat, page);
}
static LuaNode* getfreepos(Table* t)
{
while (t->lastfree > 0)
{
t->lastfree--;
LuaNode* n = gnode(t, t->lastfree);
if (ttisnil(gkey(n)))
return n;
}
return NULL; // could not find a free place
}
/*
** inserts a new key into a hash table; first, check whether key's main
** position is free. If not, check whether colliding node is in its main
** position or not: if it is not, move colliding node to an empty place and
** put new key in its main position; otherwise (colliding node is in its main
** position), new key goes to an empty position.
*/
static TValue* newkey(lua_State* L, Table* t, const TValue* key)
{
// enforce boundary invariant
if (ttisnumber(key) && nvalue(key) == t->sizearray + 1)
{
rehash(L, t, key); // grow table
// after rehash, numeric keys might be located in the new array part, but won't be found in the node part
return arrayornewkey(L, t, key);
}
LuaNode* mp = mainposition(t, key);
if (!ttisnil(gval(mp)) || mp == dummynode)
{
LuaNode* n = getfreepos(t); // get a free place
if (n == NULL)
{ // cannot find a free place?
rehash(L, t, key); // grow table
// after rehash, numeric keys might be located in the new array part, but won't be found in the node part
return arrayornewkey(L, t, key);
}
LUAU_ASSERT(n != dummynode);
TValue mk;
getnodekey(L, &mk, mp);
LuaNode* othern = mainposition(t, &mk);
if (othern != mp)
{ // is colliding node out of its main position?
// yes; move colliding node into free position
while (othern + gnext(othern) != mp)
othern += gnext(othern); // find previous
gnext(othern) = cast_int(n - othern); // redo the chain with `n' in place of `mp'
*n = *mp; // copy colliding node into free pos. (mp->next also goes)
if (gnext(mp) != 0)
{
gnext(n) += cast_int(mp - n); // correct 'next'
gnext(mp) = 0; // now 'mp' is free
}
setnilvalue(gval(mp));
}
else
{ // colliding node is in its own main position
// new node will go into free position
if (gnext(mp) != 0)
gnext(n) = cast_int((mp + gnext(mp)) - n); // chain new position
else
LUAU_ASSERT(gnext(n) == 0);
gnext(mp) = cast_int(n - mp);
mp = n;
}
}
setnodekey(L, mp, key);
luaC_barriert(L, t, key);
LUAU_ASSERT(ttisnil(gval(mp)));
return gval(mp);
}
/*
** search function for integers
*/
const TValue* luaH_getnum(Table* t, int key)
{
// (1 <= key && key <= t->sizearray)
if (cast_to(unsigned int, key - 1) < cast_to(unsigned int, t->sizearray))
return &t->array[key - 1];
else if (t->node != dummynode)
{
double nk = cast_num(key);
LuaNode* n = hashnum(t, nk);
for (;;)
{ // check whether `key' is somewhere in the chain
if (ttisnumber(gkey(n)) && luai_numeq(nvalue(gkey(n)), nk))
return gval(n); // that's it
if (gnext(n) == 0)
break;
n += gnext(n);
}
return luaO_nilobject;
}
else
return luaO_nilobject;
}
/*
** search function for strings
*/
const TValue* luaH_getstr(Table* t, TString* key)
{
LuaNode* n = hashstr(t, key);
for (;;)
{ // check whether `key' is somewhere in the chain
if (ttisstring(gkey(n)) && tsvalue(gkey(n)) == key)
return gval(n); // that's it
if (gnext(n) == 0)
break;
n += gnext(n);
}
return luaO_nilobject;
}
/*
** main search function
*/
const TValue* luaH_get(Table* t, const TValue* key)
{
switch (ttype(key))
{
case LUA_TNIL:
return luaO_nilobject;
case LUA_TSTRING:
return luaH_getstr(t, tsvalue(key));
case LUA_TNUMBER:
{
int k;
double n = nvalue(key);
luai_num2int(k, n);
if (luai_numeq(cast_num(k), nvalue(key))) // index is int?
return luaH_getnum(t, k); // use specialized version
// else go through
}
default:
{
LuaNode* n = mainposition(t, key);
for (;;)
{ // check whether `key' is somewhere in the chain
if (luaO_rawequalKey(gkey(n), key))
return gval(n); // that's it
if (gnext(n) == 0)
break;
n += gnext(n);
}
return luaO_nilobject;
}
}
}
TValue* luaH_set(lua_State* L, Table* t, const TValue* key)
{
const TValue* p = luaH_get(t, key);
invalidateTMcache(t);
if (p != luaO_nilobject)
return cast_to(TValue*, p);
else
return luaH_newkey(L, t, key);
}
TValue* luaH_newkey(lua_State* L, Table* t, const TValue* key)
{
if (ttisnil(key))
luaG_runerror(L, "table index is nil");
else if (ttisnumber(key) && luai_numisnan(nvalue(key)))
luaG_runerror(L, "table index is NaN");
else if (ttisvector(key) && luai_vecisnan(vvalue(key)))
luaG_runerror(L, "table index contains NaN");
return newkey(L, t, key);
}
TValue* luaH_setnum(lua_State* L, Table* t, int key)
{
// (1 <= key && key <= t->sizearray)
if (cast_to(unsigned int, key - 1) < cast_to(unsigned int, t->sizearray))
return &t->array[key - 1];
// hash fallback
const TValue* p = luaH_getnum(t, key);
if (p != luaO_nilobject)
return cast_to(TValue*, p);
else
{
TValue k;
setnvalue(&k, cast_num(key));
return newkey(L, t, &k);
}
}
TValue* luaH_setstr(lua_State* L, Table* t, TString* key)
{
const TValue* p = luaH_getstr(t, key);
invalidateTMcache(t);
if (p != luaO_nilobject)
return cast_to(TValue*, p);
else
{
TValue k;
setsvalue(L, &k, key);
return newkey(L, t, &k);
}
}
static int updateaboundary(Table* t, int boundary)
{
if (boundary < t->sizearray && ttisnil(&t->array[boundary - 1]))
{
if (boundary >= 2 && !ttisnil(&t->array[boundary - 2]))
{
maybesetaboundary(t, boundary - 1);
return boundary - 1;
}
}
else if (boundary + 1 < t->sizearray && !ttisnil(&t->array[boundary]) && ttisnil(&t->array[boundary + 1]))
{
maybesetaboundary(t, boundary + 1);
return boundary + 1;
}
return 0;
}
/*
** Try to find a boundary in table `t'. A `boundary' is an integer index
** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
*/
int luaH_getn(Table* t)
{
int boundary = getaboundary(t);
if (boundary > 0)
{
if (!ttisnil(&t->array[t->sizearray - 1]) && t->node == dummynode)
return t->sizearray; // fast-path: the end of the array in `t' already refers to a boundary
if (boundary < t->sizearray && !ttisnil(&t->array[boundary - 1]) && ttisnil(&t->array[boundary]))
return boundary; // fast-path: boundary already refers to a boundary in `t'
int foundboundary = updateaboundary(t, boundary);
if (foundboundary > 0)
return foundboundary;
}
int j = t->sizearray;
if (j > 0 && ttisnil(&t->array[j - 1]))
{
// "branchless" binary search from Array Layouts for Comparison-Based Searching, Paul Khuong, Pat Morin, 2017.
// note that clang is cmov-shy on cmovs around memory operands, so it will compile this to a branchy loop.
TValue* base = t->array;
int rest = j;
while (int half = rest >> 1)
{
base = ttisnil(&base[half]) ? base : base + half;
rest -= half;
}
int boundary = !ttisnil(base) + int(base - t->array);
maybesetaboundary(t, boundary);
return boundary;
}
else
{
// validate boundary invariant
LUAU_ASSERT(t->node == dummynode || ttisnil(luaH_getnum(t, j + 1)));
return j;
}
}
Table* luaH_clone(lua_State* L, Table* tt)
{
Table* t = luaM_newgco(L, Table, sizeof(Table), L->activememcat);
luaC_init(L, t, LUA_TTABLE);
t->metatable = tt->metatable;
t->tmcache = tt->tmcache;
t->array = NULL;
t->sizearray = 0;
t->lsizenode = 0;
t->nodemask8 = 0;
t->readonly = 0;
t->safeenv = 0;
t->node = cast_to(LuaNode*, dummynode);
t->lastfree = 0;
if (tt->sizearray)
{
t->array = luaM_newarray(L, tt->sizearray, TValue, t->memcat);
maybesetaboundary(t, getaboundary(tt));
t->sizearray = tt->sizearray;
memcpy(t->array, tt->array, t->sizearray * sizeof(TValue));
}
if (tt->node != dummynode)
{
int size = 1 << tt->lsizenode;
t->node = luaM_newarray(L, size, LuaNode, t->memcat);
t->lsizenode = tt->lsizenode;
t->nodemask8 = tt->nodemask8;
memcpy(t->node, tt->node, size * sizeof(LuaNode));
t->lastfree = tt->lastfree;
}
return t;
}
void luaH_clear(Table* tt)
{
// clear array part
for (int i = 0; i < tt->sizearray; ++i)
{
setnilvalue(&tt->array[i]);
}
maybesetaboundary(tt, 0);
// clear hash part
if (tt->node != dummynode)
{
int size = sizenode(tt);
tt->lastfree = size;
for (int i = 0; i < size; ++i)
{
LuaNode* n = gnode(tt, i);
setnilvalue(gkey(n));
setnilvalue(gval(n));
gnext(n) = 0;
}
}
// back to empty -> no tag methods present
tt->tmcache = cast_byte(~0);
}

View File

@ -1,35 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
#define gnode(t, i) (&(t)->node[i])
#define gkey(n) (&(n)->key)
#define gval(n) (&(n)->val)
#define gnext(n) ((n)->key.next)
#define gval2slot(t, v) int(cast_to(LuaNode*, static_cast<const TValue*>(v)) - t->node)
// reset cache of absent metamethods, cache is updated in luaT_gettm
#define invalidateTMcache(t) t->tmcache = 0
LUAI_FUNC const TValue* luaH_getnum(Table* t, int key);
LUAI_FUNC TValue* luaH_setnum(lua_State* L, Table* t, int key);
LUAI_FUNC const TValue* luaH_getstr(Table* t, TString* key);
LUAI_FUNC TValue* luaH_setstr(lua_State* L, Table* t, TString* key);
LUAI_FUNC const TValue* luaH_get(Table* t, const TValue* key);
LUAI_FUNC TValue* luaH_set(lua_State* L, Table* t, const TValue* key);
LUAI_FUNC TValue* luaH_newkey(lua_State* L, Table* t, const TValue* key);
LUAI_FUNC Table* luaH_new(lua_State* L, int narray, int lnhash);
LUAI_FUNC void luaH_resizearray(lua_State* L, Table* t, int nasize);
LUAI_FUNC void luaH_resizehash(lua_State* L, Table* t, int nhsize);
LUAI_FUNC void luaH_free(lua_State* L, Table* t, struct lua_Page* page);
LUAI_FUNC int luaH_next(lua_State* L, Table* t, StkId key);
LUAI_FUNC int luaH_getn(Table* t);
LUAI_FUNC Table* luaH_clone(lua_State* L, Table* tt);
LUAI_FUNC void luaH_clear(Table* tt);
#define luaH_setslot(L, t, slot, key) (invalidateTMcache(t), (slot == luaO_nilobject ? luaH_newkey(L, t, key) : cast_to(TValue*, slot)))
extern const LuaNode luaH_dummynode;

View File

@ -1,606 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include "lapi.h"
#include "lstate.h"
#include "ltable.h"
#include "lstring.h"
#include "lgc.h"
#include "ldebug.h"
#include "lvm.h"
static int foreachi(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
luaL_checktype(L, 2, LUA_TFUNCTION);
int i;
int n = lua_objlen(L, 1);
for (i = 1; i <= n; i++)
{
lua_pushvalue(L, 2); // function
lua_pushinteger(L, i); // 1st argument
lua_rawgeti(L, 1, i); // 2nd argument
lua_call(L, 2, 1);
if (!lua_isnil(L, -1))
return 1;
lua_pop(L, 1); // remove nil result
}
return 0;
}
static int foreach (lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
luaL_checktype(L, 2, LUA_TFUNCTION);
lua_pushnil(L); // first key
while (lua_next(L, 1))
{
lua_pushvalue(L, 2); // function
lua_pushvalue(L, -3); // key
lua_pushvalue(L, -3); // value
lua_call(L, 2, 1);
if (!lua_isnil(L, -1))
return 1;
lua_pop(L, 2); // remove value and result
}
return 0;
}
static int maxn(lua_State* L)
{
double max = 0;
luaL_checktype(L, 1, LUA_TTABLE);
lua_pushnil(L); // first key
while (lua_next(L, 1))
{
lua_pop(L, 1); // remove value
if (lua_type(L, -1) == LUA_TNUMBER)
{
double v = lua_tonumber(L, -1);
if (v > max)
max = v;
}
}
lua_pushnumber(L, max);
return 1;
}
static int getn(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
lua_pushinteger(L, lua_objlen(L, 1));
return 1;
}
static void moveelements(lua_State* L, int srct, int dstt, int f, int e, int t)
{
Table* src = hvalue(L->base + (srct - 1));
Table* dst = hvalue(L->base + (dstt - 1));
if (dst->readonly)
luaG_readonlyerror(L);
int n = e - f + 1; // number of elements to move
if (cast_to(unsigned int, f - 1) < cast_to(unsigned int, src->sizearray) &&
cast_to(unsigned int, t - 1) < cast_to(unsigned int, dst->sizearray) &&
cast_to(unsigned int, f - 1 + n) <= cast_to(unsigned int, src->sizearray) &&
cast_to(unsigned int, t - 1 + n) <= cast_to(unsigned int, dst->sizearray))
{
TValue* srcarray = src->array;
TValue* dstarray = dst->array;
if (t > e || t <= f || (dstt != srct && dst != src))
{
for (int i = 0; i < n; ++i)
{
TValue* s = &srcarray[f + i - 1];
TValue* d = &dstarray[t + i - 1];
setobj2t(L, d, s);
}
}
else
{
for (int i = n - 1; i >= 0; i--)
{
TValue* s = &srcarray[(f + i) - 1];
TValue* d = &dstarray[(t + i) - 1];
setobj2t(L, d, s);
}
}
luaC_barrierfast(L, dst);
}
else
{
if (t > e || t <= f || dst != src)
{
for (int i = 0; i < n; ++i)
{
lua_rawgeti(L, srct, f + i);
lua_rawseti(L, dstt, t + i);
}
}
else
{
for (int i = n - 1; i >= 0; i--)
{
lua_rawgeti(L, srct, f + i);
lua_rawseti(L, dstt, t + i);
}
}
}
}
static int tinsert(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
int n = lua_objlen(L, 1);
int pos; // where to insert new element
switch (lua_gettop(L))
{
case 2:
{ // called with only 2 arguments
pos = n + 1; // insert new element at the end
break;
}
case 3:
{
pos = luaL_checkinteger(L, 2); // 2nd argument is the position
// move up elements if necessary
if (1 <= pos && pos <= n)
moveelements(L, 1, 1, pos, n, pos + 1);
break;
}
default:
{
luaL_error(L, "wrong number of arguments to 'insert'");
}
}
lua_rawseti(L, 1, pos); // t[pos] = v
return 0;
}
static int tremove(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
int n = lua_objlen(L, 1);
int pos = luaL_optinteger(L, 2, n);
if (!(1 <= pos && pos <= n)) // position is outside bounds?
return 0; // nothing to remove
lua_rawgeti(L, 1, pos); // result = t[pos]
moveelements(L, 1, 1, pos + 1, n, pos);
lua_pushnil(L);
lua_rawseti(L, 1, n); // t[n] = nil
return 1;
}
/*
** Copy elements (1[f], ..., 1[e]) into (tt[t], tt[t+1], ...). Whenever
** possible, copy in increasing order, which is better for rehashing.
** "possible" means destination after original range, or smaller
** than origin, or copying to another table.
*/
static int tmove(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
int f = luaL_checkinteger(L, 2);
int e = luaL_checkinteger(L, 3);
int t = luaL_checkinteger(L, 4);
int tt = !lua_isnoneornil(L, 5) ? 5 : 1; // destination table
luaL_checktype(L, tt, LUA_TTABLE);
if (e >= f)
{ // otherwise, nothing to move
luaL_argcheck(L, f > 0 || e < INT_MAX + f, 3, "too many elements to move");
int n = e - f + 1; // number of elements to move
luaL_argcheck(L, t <= INT_MAX - n + 1, 4, "destination wrap around");
Table* dst = hvalue(L->base + (tt - 1));
if (dst->readonly) // also checked in moveelements, but this blocks resizes of r/o tables
luaG_readonlyerror(L);
if (t > 0 && (t - 1) <= dst->sizearray && (t - 1 + n) > dst->sizearray)
{ // grow the destination table array
luaH_resizearray(L, dst, t - 1 + n);
}
moveelements(L, 1, tt, f, e, t);
}
lua_pushvalue(L, tt); // return destination table
return 1;
}
static void addfield(lua_State* L, luaL_Buffer* b, int i)
{
lua_rawgeti(L, 1, i);
if (!lua_isstring(L, -1))
luaL_error(L, "invalid value (%s) at index %d in table for 'concat'", luaL_typename(L, -1), i);
luaL_addvalue(b);
}
static int tconcat(lua_State* L)
{
luaL_Buffer b;
size_t lsep;
int i, last;
const char* sep = luaL_optlstring(L, 2, "", &lsep);
luaL_checktype(L, 1, LUA_TTABLE);
i = luaL_optinteger(L, 3, 1);
last = luaL_opt(L, luaL_checkinteger, 4, lua_objlen(L, 1));
luaL_buffinit(L, &b);
for (; i < last; i++)
{
addfield(L, &b, i);
luaL_addlstring(&b, sep, lsep, -1);
}
if (i == last) // add last value (if interval was not empty)
addfield(L, &b, i);
luaL_pushresult(&b);
return 1;
}
static int tpack(lua_State* L)
{
int n = lua_gettop(L); // number of elements to pack
lua_createtable(L, n, 1); // create result table
Table* t = hvalue(L->top - 1);
for (int i = 0; i < n; ++i)
{
TValue* e = &t->array[i];
setobj2t(L, e, L->base + i);
}
// t.n = number of elements
TValue* nv = luaH_setstr(L, t, luaS_newliteral(L, "n"));
setnvalue(nv, n);
return 1; // return table
}
static int tunpack(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
Table* t = hvalue(L->base);
int i = luaL_optinteger(L, 2, 1);
int e = luaL_opt(L, luaL_checkinteger, 3, lua_objlen(L, 1));
if (i > e)
return 0; // empty range
unsigned n = (unsigned)e - i; // number of elements minus 1 (avoid overflows)
if (n >= (unsigned int)INT_MAX || !lua_checkstack(L, (int)(++n)))
luaL_error(L, "too many results to unpack");
// fast-path: direct array-to-stack copy
if (i == 1 && int(n) <= t->sizearray)
{
for (i = 0; i < int(n); i++)
setobj2s(L, L->top + i, &t->array[i]);
L->top += n;
}
else
{
// push arg[i..e - 1] (to avoid overflows)
for (; i < e; i++)
lua_rawgeti(L, 1, i);
lua_rawgeti(L, 1, e); // push last element
}
return (int)n;
}
typedef int (*SortPredicate)(lua_State* L, const TValue* l, const TValue* r);
static int sort_func(lua_State* L, const TValue* l, const TValue* r)
{
LUAU_ASSERT(L->top == L->base + 2); // table, function
setobj2s(L, L->top, &L->base[1]);
setobj2s(L, L->top + 1, l);
setobj2s(L, L->top + 2, r);
L->top += 3; // safe because of LUA_MINSTACK guarantee
luaD_call(L, L->top - 3, 1);
L->top -= 1; // maintain stack depth
return !l_isfalse(L->top);
}
inline void sort_swap(lua_State* L, Table* t, int i, int j)
{
TValue* arr = t->array;
int n = t->sizearray;
LUAU_ASSERT(unsigned(i) < unsigned(n) && unsigned(j) < unsigned(n)); // contract maintained in sort_less after predicate call
// no barrier required because both elements are in the array before and after the swap
TValue temp;
setobj2s(L, &temp, &arr[i]);
setobj2t(L, &arr[i], &arr[j]);
setobj2t(L, &arr[j], &temp);
}
inline int sort_less(lua_State* L, Table* t, int i, int j, SortPredicate pred)
{
TValue* arr = t->array;
int n = t->sizearray;
LUAU_ASSERT(unsigned(i) < unsigned(n) && unsigned(j) < unsigned(n)); // contract maintained in sort_less after predicate call
int res = pred(L, &arr[i], &arr[j]);
// predicate call may resize the table, which is invalid
if (t->sizearray != n)
luaL_error(L, "table modified during sorting");
return res;
}
static void sort_siftheap(lua_State* L, Table* t, int l, int u, SortPredicate pred, int root)
{
LUAU_ASSERT(l <= u);
int count = u - l + 1;
// process all elements with two children
while (root * 2 + 2 < count)
{
int left = root * 2 + 1, right = root * 2 + 2;
int next = root;
next = sort_less(L, t, l + next, l + left, pred) ? left : next;
next = sort_less(L, t, l + next, l + right, pred) ? right : next;
if (next == root)
break;
sort_swap(L, t, l + root, l + next);
root = next;
}
// process last element if it has just one child
int lastleft = root * 2 + 1;
if (lastleft == count - 1 && sort_less(L, t, l + root, l + lastleft, pred))
sort_swap(L, t, l + root, l + lastleft);
}
static void sort_heap(lua_State* L, Table* t, int l, int u, SortPredicate pred)
{
LUAU_ASSERT(l <= u);
int count = u - l + 1;
for (int i = count / 2 - 1; i >= 0; --i)
sort_siftheap(L, t, l, u, pred, i);
for (int i = count - 1; i > 0; --i)
{
sort_swap(L, t, l, l + i);
sort_siftheap(L, t, l, l + i - 1, pred, 0);
}
}
static void sort_rec(lua_State* L, Table* t, int l, int u, int limit, SortPredicate pred)
{
// sort range [l..u] (inclusive, 0-based)
while (l < u)
{
// if the limit has been reached, quick sort is going over the permitted nlogn complexity, so we fall back to heap sort
if (limit == 0)
return sort_heap(L, t, l, u, pred);
// sort elements a[l], a[(l+u)/2] and a[u]
// note: this simultaneously acts as a small sort and a median selector
if (sort_less(L, t, u, l, pred)) // a[u] < a[l]?
sort_swap(L, t, u, l); // swap a[l] - a[u]
if (u - l == 1)
break; // only 2 elements
int m = l + ((u - l) >> 1); // midpoint
if (sort_less(L, t, m, l, pred)) // a[m]<a[l]?
sort_swap(L, t, m, l);
else if (sort_less(L, t, u, m, pred)) // a[u]<a[m]?
sort_swap(L, t, m, u);
if (u - l == 2)
break; // only 3 elements
// here l, m, u are ordered; m will become the new pivot
int p = u - 1;
sort_swap(L, t, m, u - 1); // pivot is now (and always) at u-1
// a[l] <= P == a[u-1] <= a[u], only need to sort from l+1 to u-2
int i = l;
int j = u - 1;
for (;;)
{ // invariant: a[l..i] <= P <= a[j..u]
// repeat ++i until a[i] >= P
while (sort_less(L, t, ++i, p, pred))
{
if (i >= u)
luaL_error(L, "invalid order function for sorting");
}
// repeat --j until a[j] <= P
while (sort_less(L, t, p, --j, pred))
{
if (j <= l)
luaL_error(L, "invalid order function for sorting");
}
if (j < i)
break;
sort_swap(L, t, i, j);
}
// swap pivot a[p] with a[i], which is the new midpoint
sort_swap(L, t, p, i);
// adjust limit to allow 1.5 log2N recursive steps
limit = (limit >> 1) + (limit >> 2);
// a[l..i-1] <= a[i] == P <= a[i+1..u]
// sort smaller half recursively; the larger half is sorted in the next loop iteration
if (i - l < u - i)
{
sort_rec(L, t, l, i - 1, limit, pred);
l = i + 1;
}
else
{
sort_rec(L, t, i + 1, u, limit, pred);
u = i - 1;
}
}
}
static int tsort(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
Table* t = hvalue(L->base);
int n = luaH_getn(t);
if (t->readonly)
luaG_readonlyerror(L);
SortPredicate pred = luaV_lessthan;
if (!lua_isnoneornil(L, 2)) // is there a 2nd argument?
{
luaL_checktype(L, 2, LUA_TFUNCTION);
pred = sort_func;
}
lua_settop(L, 2); // make sure there are two arguments
if (n > 0)
sort_rec(L, t, 0, n - 1, n, pred);
return 0;
}
static int tcreate(lua_State* L)
{
int size = luaL_checkinteger(L, 1);
if (size < 0)
luaL_argerror(L, 1, "size out of range");
if (!lua_isnoneornil(L, 2))
{
lua_createtable(L, size, 0);
Table* t = hvalue(L->top - 1);
StkId v = L->base + 1;
for (int i = 0; i < size; ++i)
{
TValue* e = &t->array[i];
setobj2t(L, e, v);
}
}
else
{
lua_createtable(L, size, 0);
}
return 1;
}
static int tfind(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
luaL_checkany(L, 2);
int init = luaL_optinteger(L, 3, 1);
if (init < 1)
luaL_argerror(L, 3, "index out of range");
Table* t = hvalue(L->base);
StkId v = L->base + 1;
for (int i = init;; ++i)
{
const TValue* e = luaH_getnum(t, i);
if (ttisnil(e))
break;
if (equalobj(L, v, e))
{
lua_pushinteger(L, i);
return 1;
}
}
lua_pushnil(L);
return 1;
}
static int tclear(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
Table* tt = hvalue(L->base);
if (tt->readonly)
luaG_readonlyerror(L);
luaH_clear(tt);
return 0;
}
static int tfreeze(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
luaL_argcheck(L, !lua_getreadonly(L, 1), 1, "table is already frozen");
luaL_argcheck(L, !luaL_getmetafield(L, 1, "__metatable"), 1, "table has a protected metatable");
lua_setreadonly(L, 1, true);
lua_pushvalue(L, 1);
return 1;
}
static int tisfrozen(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
lua_pushboolean(L, lua_getreadonly(L, 1));
return 1;
}
static int tclone(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
luaL_argcheck(L, !luaL_getmetafield(L, 1, "__metatable"), 1, "table has a protected metatable");
Table* tt = luaH_clone(L, hvalue(L->base));
TValue v;
sethvalue(L, &v, tt);
luaA_pushobject(L, &v);
return 1;
}
static const luaL_Reg tab_funcs[] = {
{"concat", tconcat},
{"foreach", foreach},
{"foreachi", foreachi},
{"getn", getn},
{"maxn", maxn},
{"insert", tinsert},
{"remove", tremove},
{"sort", tsort},
{"pack", tpack},
{"unpack", tunpack},
{"move", tmove},
{"create", tcreate},
{"find", tfind},
{"clear", tclear},
{"freeze", tfreeze},
{"isfrozen", tisfrozen},
{"clone", tclone},
{NULL, NULL},
};
int luaopen_table(lua_State* L)
{
luaL_register(L, LUA_TABLIBNAME, tab_funcs);
// Lua 5.1 compat
lua_pushcfunction(L, tunpack, "unpack");
lua_setglobal(L, "unpack");
return 1;
}

View File

@ -1,144 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "ltm.h"
#include "lstate.h"
#include "lstring.h"
#include "ludata.h"
#include "ltable.h"
#include "lgc.h"
#include <string.h>
// clang-format off
const char* const luaT_typenames[] = {
// ORDER TYPE
"nil",
"boolean",
"userdata",
"number",
"vector",
"string",
"table",
"function",
"userdata",
"thread",
};
const char* const luaT_eventname[] = {
// ORDER TM
"__index",
"__newindex",
"__mode",
"__namecall",
"__call",
"__iter",
"__len",
"__eq",
"__add",
"__sub",
"__mul",
"__div",
"__mod",
"__pow",
"__unm",
"__lt",
"__le",
"__concat",
"__type",
"__metatable",
};
// clang-format on
static_assert(sizeof(luaT_typenames) / sizeof(luaT_typenames[0]) == LUA_T_COUNT, "luaT_typenames size mismatch");
static_assert(sizeof(luaT_eventname) / sizeof(luaT_eventname[0]) == TM_N, "luaT_eventname size mismatch");
static_assert(TM_EQ < 8, "fasttm optimization stores a bitfield with metamethods in a byte");
void luaT_init(lua_State* L)
{
int i;
for (i = 0; i < LUA_T_COUNT; i++)
{
L->global->ttname[i] = luaS_new(L, luaT_typenames[i]);
luaS_fix(L->global->ttname[i]); // never collect these names
}
for (i = 0; i < TM_N; i++)
{
L->global->tmname[i] = luaS_new(L, luaT_eventname[i]);
luaS_fix(L->global->tmname[i]); // never collect these names
}
}
/*
** function to be used with macro "fasttm": optimized for absence of
** tag methods.
*/
const TValue* luaT_gettm(Table* events, TMS event, TString* ename)
{
const TValue* tm = luaH_getstr(events, ename);
LUAU_ASSERT(event <= TM_EQ);
if (ttisnil(tm))
{ // no tag method?
events->tmcache |= cast_byte(1u << event); // cache this fact
return NULL;
}
else
return tm;
}
const TValue* luaT_gettmbyobj(lua_State* L, const TValue* o, TMS event)
{
/*
NB: Tag-methods were replaced by meta-methods in Lua 5.0, but the
old names are still around (this function, for example).
*/
Table* mt;
switch (ttype(o))
{
case LUA_TTABLE:
mt = hvalue(o)->metatable;
break;
case LUA_TUSERDATA:
mt = uvalue(o)->metatable;
break;
default:
mt = L->global->mt[ttype(o)];
}
return (mt ? luaH_getstr(mt, L->global->tmname[event]) : luaO_nilobject);
}
const TString* luaT_objtypenamestr(lua_State* L, const TValue* o)
{
if (ttisuserdata(o) && uvalue(o)->tag != UTAG_PROXY && uvalue(o)->metatable)
{
const TValue* type = luaH_getstr(uvalue(o)->metatable, L->global->tmname[TM_TYPE]);
if (ttisstring(type))
return tsvalue(type);
}
else if (Table* mt = L->global->mt[ttype(o)])
{
const TValue* type = luaH_getstr(mt, L->global->tmname[TM_TYPE]);
if (ttisstring(type))
return tsvalue(type);
}
return L->global->ttname[ttype(o)];
}
const char* luaT_objtypename(lua_State* L, const TValue* o)
{
return getstr(luaT_objtypenamestr(L, o));
}

View File

@ -1,59 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
/*
* WARNING: if you change the order of this enumeration,
* grep "ORDER TM"
*/
// clang-format off
typedef enum
{
TM_INDEX,
TM_NEWINDEX,
TM_MODE,
TM_NAMECALL,
TM_CALL,
TM_ITER,
TM_LEN,
TM_EQ, // last tag method with `fast' access
TM_ADD,
TM_SUB,
TM_MUL,
TM_DIV,
TM_MOD,
TM_POW,
TM_UNM,
TM_LT,
TM_LE,
TM_CONCAT,
TM_TYPE,
TM_METATABLE,
TM_N // number of elements in the enum
} TMS;
// clang-format on
#define gfasttm(g, et, e) ((et) == NULL ? NULL : ((et)->tmcache & (1u << (e))) ? NULL : luaT_gettm(et, e, (g)->tmname[e]))
#define fasttm(l, et, e) gfasttm(l->global, et, e)
#define fastnotm(et, e) ((et) == NULL || ((et)->tmcache & (1u << (e))))
LUAI_DATA const char* const luaT_typenames[];
LUAI_DATA const char* const luaT_eventname[];
LUAI_FUNC const TValue* luaT_gettm(Table* events, TMS event, TString* ename);
LUAI_FUNC const TValue* luaT_gettmbyobj(lua_State* L, const TValue* o, TMS event);
LUAI_FUNC const TString* luaT_objtypenamestr(lua_State* L, const TValue* o);
LUAI_FUNC const char* luaT_objtypename(lua_State* L, const TValue* o);
LUAI_FUNC void luaT_init(lua_State* L);

View File

@ -1,43 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "ludata.h"
#include "lgc.h"
#include "lmem.h"
#include <string.h>
Udata* luaU_newudata(lua_State* L, size_t s, int tag)
{
if (s > INT_MAX - sizeof(Udata))
luaM_toobig(L);
Udata* u = luaM_newgco(L, Udata, sizeudata(s), L->activememcat);
luaC_init(L, u, LUA_TUSERDATA);
u->len = int(s);
u->metatable = NULL;
LUAU_ASSERT(tag >= 0 && tag <= 255);
u->tag = uint8_t(tag);
return u;
}
void luaU_freeudata(lua_State* L, Udata* u, lua_Page* page)
{
if (u->tag < LUA_UTAG_LIMIT)
{
lua_Destructor dtor = L->global->udatagc[u->tag];
// TODO: access to L here is highly unsafe since this is called during internal GC traversal
// certain operations such as lua_getthreaddata are okay, but by and large this risks crashes on improper use
if (dtor)
dtor(L, u->data);
}
else if (u->tag == UTAG_IDTOR)
{
void (*dtor)(void*) = nullptr;
memcpy(&dtor, &u->data + u->len - sizeof(dtor), sizeof(dtor));
if (dtor)
dtor(u->data);
}
luaM_freegco(L, u, sizeudata(u->len), u->memcat, page);
}

View File

@ -1,16 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
// special tag value is used for user data with inline dtors
#define UTAG_IDTOR LUA_UTAG_LIMIT
// special tag value is used for newproxy-created user data (all other user data objects are host-exposed)
#define UTAG_PROXY (LUA_UTAG_LIMIT + 1)
#define sizeudata(len) (offsetof(Udata, data) + len)
LUAI_FUNC Udata* luaU_newudata(lua_State* L, size_t s, int tag);
LUAI_FUNC void luaU_freeudata(lua_State* L, Udata* u, struct lua_Page* page);

View File

@ -1,294 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lualib.h"
#include "lcommon.h"
#define MAXUNICODE 0x10FFFF
#define iscont(p) ((*(p)&0xC0) == 0x80)
// from strlib
// translate a relative string position: negative means back from end
static int u_posrelat(int pos, size_t len)
{
if (pos >= 0)
return pos;
else if (0u - (size_t)pos > len)
return 0;
else
return (int)len + pos + 1;
}
/*
** Decode one UTF-8 sequence, returning NULL if byte sequence is invalid.
*/
static const char* utf8_decode(const char* o, int* val)
{
static const unsigned int limits[] = {0xFF, 0x7F, 0x7FF, 0xFFFF};
const unsigned char* s = (const unsigned char*)o;
unsigned int c = s[0];
unsigned int res = 0; // final result
if (c < 0x80) // ascii?
res = c;
else
{
int count = 0; // to count number of continuation bytes
while (c & 0x40)
{ // still have continuation bytes?
int cc = s[++count]; // read next byte
if ((cc & 0xC0) != 0x80) // not a continuation byte?
return NULL; // invalid byte sequence
res = (res << 6) | (cc & 0x3F); // add lower 6 bits from cont. byte
c <<= 1; // to test next bit
}
res |= ((c & 0x7F) << (count * 5)); // add first byte
if (count > 3 || res > MAXUNICODE || res <= limits[count])
return NULL; // invalid byte sequence
s += count; // skip continuation bytes read
}
if (val)
*val = res;
return (const char*)s + 1; // +1 to include first byte
}
/*
** utf8len(s [, i [, j]]) --> number of characters that start in the
** range [i,j], or nil + current position if 's' is not well formed in
** that interval
*/
static int utflen(lua_State* L)
{
int n = 0;
size_t len;
const char* s = luaL_checklstring(L, 1, &len);
int posi = u_posrelat(luaL_optinteger(L, 2, 1), len);
int posj = u_posrelat(luaL_optinteger(L, 3, -1), len);
luaL_argcheck(L, 1 <= posi && --posi <= (int)len, 2, "initial position out of string");
luaL_argcheck(L, --posj < (int)len, 3, "final position out of string");
while (posi <= posj)
{
const char* s1 = utf8_decode(s + posi, NULL);
if (s1 == NULL)
{ // conversion error?
lua_pushnil(L); // return nil ...
lua_pushinteger(L, posi + 1); // ... and current position
return 2;
}
posi = (int)(s1 - s);
n++;
}
lua_pushinteger(L, n);
return 1;
}
/*
** codepoint(s, [i, [j]]) -> returns codepoints for all characters
** that start in the range [i,j]
*/
static int codepoint(lua_State* L)
{
size_t len;
const char* s = luaL_checklstring(L, 1, &len);
int posi = u_posrelat(luaL_optinteger(L, 2, 1), len);
int pose = u_posrelat(luaL_optinteger(L, 3, posi), len);
int n;
const char* se;
luaL_argcheck(L, posi >= 1, 2, "out of range");
luaL_argcheck(L, pose <= (int)len, 3, "out of range");
if (posi > pose)
return 0; // empty interval; return no values
if (pose - posi >= INT_MAX) // (int -> int) overflow?
luaL_error(L, "string slice too long");
n = (int)(pose - posi) + 1;
luaL_checkstack(L, n, "string slice too long");
n = 0;
se = s + pose;
for (s += posi - 1; s < se;)
{
int code;
s = utf8_decode(s, &code);
if (s == NULL)
luaL_error(L, "invalid UTF-8 code");
lua_pushinteger(L, code);
n++;
}
return n;
}
// from Lua 5.3 lobject.h
#define UTF8BUFFSZ 8
// from Lua 5.3 lobject.c, copied verbatim + static
static int luaO_utf8esc(char* buff, unsigned long x)
{
int n = 1; // number of bytes put in buffer (backwards)
LUAU_ASSERT(x <= 0x10FFFF);
if (x < 0x80) // ascii?
buff[UTF8BUFFSZ - 1] = cast_to(char, x);
else
{ // need continuation bytes
unsigned int mfb = 0x3f; // maximum that fits in first byte
do
{ // add continuation bytes
buff[UTF8BUFFSZ - (n++)] = cast_to(char, 0x80 | (x & 0x3f));
x >>= 6; // remove added bits
mfb >>= 1; // now there is one less bit available in first byte
} while (x > mfb); // still needs continuation byte?
buff[UTF8BUFFSZ - n] = cast_to(char, (~mfb << 1) | x); // add first byte
}
return n;
}
// lighter replacement for pushutfchar; doesn't push any string onto the stack
static int buffutfchar(lua_State* L, int arg, char* buff, const char** charstr)
{
int code = luaL_checkinteger(L, arg);
luaL_argcheck(L, 0 <= code && code <= MAXUNICODE, arg, "value out of range");
int l = luaO_utf8esc(buff, cast_to(long, code));
*charstr = buff + UTF8BUFFSZ - l;
return l;
}
/*
** utfchar(n1, n2, ...) -> char(n1)..char(n2)...
**
** This version avoids the need to make more invasive upgrades elsewhere (like
** implementing the %U escape in lua_pushfstring) and avoids pushing string
** objects for each codepoint in the multi-argument case. -Jovanni
*/
static int utfchar(lua_State* L)
{
char buff[UTF8BUFFSZ];
const char* charstr;
int n = lua_gettop(L); // number of arguments
if (n == 1)
{ // optimize common case of single char
int l = buffutfchar(L, 1, buff, &charstr);
lua_pushlstring(L, charstr, l);
}
else
{
luaL_Buffer b;
luaL_buffinit(L, &b);
for (int i = 1; i <= n; i++)
{
int l = buffutfchar(L, i, buff, &charstr);
luaL_addlstring(&b, charstr, l, -1);
}
luaL_pushresult(&b);
}
return 1;
}
/*
** offset(s, n, [i]) -> index where n-th character counting from
** position 'i' starts; 0 means character at 'i'.
*/
static int byteoffset(lua_State* L)
{
size_t len;
const char* s = luaL_checklstring(L, 1, &len);
int n = luaL_checkinteger(L, 2);
int posi = (n >= 0) ? 1 : (int)len + 1;
posi = u_posrelat(luaL_optinteger(L, 3, posi), len);
luaL_argcheck(L, 1 <= posi && --posi <= (int)len, 3, "position out of range");
if (n == 0)
{
// find beginning of current byte sequence
while (posi > 0 && iscont(s + posi))
posi--;
}
else
{
if (iscont(s + posi))
luaL_error(L, "initial position is a continuation byte");
if (n < 0)
{
while (n < 0 && posi > 0)
{ // move back
do
{ // find beginning of previous character
posi--;
} while (posi > 0 && iscont(s + posi));
n++;
}
}
else
{
n--; // do not move for 1st character
while (n > 0 && posi < (int)len)
{
do
{ // find beginning of next character
posi++;
} while (iscont(s + posi)); // (cannot pass final '\0')
n--;
}
}
}
if (n == 0) // did it find given character?
lua_pushinteger(L, posi + 1);
else // no such character
lua_pushnil(L);
return 1;
}
static int iter_aux(lua_State* L)
{
size_t len;
const char* s = luaL_checklstring(L, 1, &len);
int n = lua_tointeger(L, 2) - 1;
if (n < 0) // first iteration?
n = 0; // start from here
else if (n < (int)len)
{
n++; // skip current byte
while (iscont(s + n))
n++; // and its continuations
}
if (n >= (int)len)
return 0; // no more codepoints
else
{
int code;
const char* next = utf8_decode(s + n, &code);
if (next == NULL || iscont(next))
luaL_error(L, "invalid UTF-8 code");
lua_pushinteger(L, n + 1);
lua_pushinteger(L, code);
return 2;
}
}
static int iter_codes(lua_State* L)
{
luaL_checkstring(L, 1);
lua_pushcfunction(L, iter_aux, NULL);
lua_pushvalue(L, 1);
lua_pushinteger(L, 0);
return 3;
}
// pattern to match a single UTF-8 character
#define UTF8PATT "[\0-\x7F\xC2-\xF4][\x80-\xBF]*"
static const luaL_Reg funcs[] = {
{"offset", byteoffset},
{"codepoint", codepoint},
{"char", utfchar},
{"len", utflen},
{"codes", iter_codes},
{NULL, NULL},
};
int luaopen_utf8(lua_State* L)
{
luaL_register(L, LUA_UTF8LIBNAME, funcs);
lua_pushlstring(L, UTF8PATT, sizeof(UTF8PATT) / sizeof(char) - 1);
lua_setfield(L, -2, "charpattern");
return 1;
}

View File

@ -1,34 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#pragma once
#include "lobject.h"
#include "ltm.h"
#define tostring(L, o) ((ttype(o) == LUA_TSTRING) || (luaV_tostring(L, o)))
#define tonumber(o, n) (ttype(o) == LUA_TNUMBER || (((o) = luaV_tonumber(o, n)) != NULL))
#define equalobj(L, o1, o2) (ttype(o1) == ttype(o2) && luaV_equalval(L, o1, o2))
LUAI_FUNC int luaV_strcmp(const TString* ls, const TString* rs);
LUAI_FUNC int luaV_lessthan(lua_State* L, const TValue* l, const TValue* r);
LUAI_FUNC int luaV_lessequal(lua_State* L, const TValue* l, const TValue* r);
LUAI_FUNC int luaV_equalval(lua_State* L, const TValue* t1, const TValue* t2);
LUAI_FUNC void luaV_doarith(lua_State* L, StkId ra, const TValue* rb, const TValue* rc, TMS op);
LUAI_FUNC void luaV_dolen(lua_State* L, StkId ra, const TValue* rb);
LUAI_FUNC const TValue* luaV_tonumber(const TValue* obj, TValue* n);
LUAI_FUNC const float* luaV_tovector(const TValue* obj);
LUAI_FUNC int luaV_tostring(lua_State* L, StkId obj);
LUAI_FUNC void luaV_gettable(lua_State* L, const TValue* t, TValue* key, StkId val);
LUAI_FUNC void luaV_settable(lua_State* L, const TValue* t, TValue* key, StkId val);
LUAI_FUNC void luaV_concat(lua_State* L, int total, int last);
LUAI_FUNC void luaV_getimport(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil);
LUAI_FUNC void luaV_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit);
LUAI_FUNC void luaV_callTM(lua_State* L, int nparams, int res);
LUAI_FUNC void luaV_tryfuncTM(lua_State* L, StkId func);
LUAI_FUNC void luau_execute(lua_State* L);
LUAI_FUNC int luau_precall(lua_State* L, struct lua_TValue* func, int nresults);
LUAI_FUNC void luau_poscall(lua_State* L, StkId first);
LUAI_FUNC void luau_callhook(lua_State* L, lua_Hook hook, void* userdata);

File diff suppressed because it is too large Load Diff

View File

@ -1,408 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lvm.h"
#include "lstate.h"
#include "ltable.h"
#include "lfunc.h"
#include "lstring.h"
#include "lgc.h"
#include "lmem.h"
#include "lbytecode.h"
#include "lapi.h"
#include <string.h>
// TODO: RAII deallocation doesn't work for longjmp builds if a memory error happens
template<typename T>
struct TempBuffer
{
lua_State* L;
T* data;
size_t count;
TempBuffer(lua_State* L, size_t count)
: L(L)
, data(luaM_newarray(L, count, T, 0))
, count(count)
{
}
~TempBuffer()
{
luaM_freearray(L, data, count, T, 0);
}
T& operator[](size_t index)
{
LUAU_ASSERT(index < count);
return data[index];
}
};
void luaV_getimport(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil)
{
int count = id >> 30;
LUAU_ASSERT(count > 0);
int id0 = int(id >> 20) & 1023;
int id1 = int(id >> 10) & 1023;
int id2 = int(id) & 1023;
// after the first call to luaV_gettable, res may be invalid, and env may (sometimes) be garbage collected
// we take care to not use env again and to restore res before every consecutive use
ptrdiff_t resp = savestack(L, res);
// global lookup for id0
TValue g;
sethvalue(L, &g, env);
luaV_gettable(L, &g, &k[id0], res);
// table lookup for id1
if (count < 2)
return;
res = restorestack(L, resp);
if (!propagatenil || !ttisnil(res))
luaV_gettable(L, res, &k[id1], res);
// table lookup for id2
if (count < 3)
return;
res = restorestack(L, resp);
if (!propagatenil || !ttisnil(res))
luaV_gettable(L, res, &k[id2], res);
}
template<typename T>
static T read(const char* data, size_t size, size_t& offset)
{
T result;
memcpy(&result, data + offset, sizeof(T));
offset += sizeof(T);
return result;
}
static unsigned int readVarInt(const char* data, size_t size, size_t& offset)
{
unsigned int result = 0;
unsigned int shift = 0;
uint8_t byte;
do
{
byte = read<uint8_t>(data, size, offset);
result |= (byte & 127) << shift;
shift += 7;
} while (byte & 128);
return result;
}
static TString* readString(TempBuffer<TString*>& strings, const char* data, size_t size, size_t& offset)
{
unsigned int id = readVarInt(data, size, offset);
return id == 0 ? NULL : strings[id - 1];
}
static void resolveImportSafe(lua_State* L, Table* env, TValue* k, uint32_t id)
{
struct ResolveImport
{
TValue* k;
uint32_t id;
static void run(lua_State* L, void* ud)
{
ResolveImport* self = static_cast<ResolveImport*>(ud);
// note: we call getimport with nil propagation which means that accesses to table chains like A.B.C will resolve in nil
// this is technically not necessary but it reduces the number of exceptions when loading scripts that rely on getfenv/setfenv for global
// injection
// allocate a stack slot so that we can do table lookups
luaD_checkstack(L, 1);
setnilvalue(L->top);
L->top++;
luaV_getimport(L, L->gt, self->k, L->top - 1, self->id, /* propagatenil= */ true);
}
};
ResolveImport ri = {k, id};
if (L->gt->safeenv)
{
// luaD_pcall will make sure that if any C/Lua calls during import resolution fail, the thread state is restored back
int oldTop = lua_gettop(L);
int status = luaD_pcall(L, &ResolveImport::run, &ri, savestack(L, L->top), 0);
LUAU_ASSERT(oldTop + 1 == lua_gettop(L)); // if an error occurred, luaD_pcall saves it on stack
if (status != 0)
{
// replace error object with nil
setnilvalue(L->top - 1);
}
}
else
{
setnilvalue(L->top);
L->top++;
}
}
int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size, int env)
{
size_t offset = 0;
uint8_t version = read<uint8_t>(data, size, offset);
// 0 means the rest of the bytecode is the error message
if (version == 0)
{
char chunkbuf[LUA_IDSIZE];
const char* chunkid = luaO_chunkid(chunkbuf, sizeof(chunkbuf), chunkname, strlen(chunkname));
lua_pushfstring(L, "%s%.*s", chunkid, int(size - offset), data + offset);
return 1;
}
if (version < LBC_VERSION_MIN || version > LBC_VERSION_MAX)
{
char chunkbuf[LUA_IDSIZE];
const char* chunkid = luaO_chunkid(chunkbuf, sizeof(chunkbuf), chunkname, strlen(chunkname));
lua_pushfstring(L, "%s: bytecode version mismatch (expected [%d..%d], got %d)", chunkid, LBC_VERSION_MIN, LBC_VERSION_MAX, version);
return 1;
}
// pause GC for the duration of deserialization - some objects we're creating aren't rooted
// TODO: if an allocation error happens mid-load, we do not unpause GC!
size_t GCthreshold = L->global->GCthreshold;
L->global->GCthreshold = SIZE_MAX;
// env is 0 for current environment and a stack index otherwise
Table* envt = (env == 0) ? L->gt : hvalue(luaA_toobject(L, env));
TString* source = luaS_new(L, chunkname);
if (version >= 4)
{
uint8_t typesversion = read<uint8_t>(data, size, offset);
LUAU_ASSERT(typesversion == 1);
}
// string table
unsigned int stringCount = readVarInt(data, size, offset);
TempBuffer<TString*> strings(L, stringCount);
for (unsigned int i = 0; i < stringCount; ++i)
{
unsigned int length = readVarInt(data, size, offset);
strings[i] = luaS_newlstr(L, data + offset, length);
offset += length;
}
// proto table
unsigned int protoCount = readVarInt(data, size, offset);
TempBuffer<Proto*> protos(L, protoCount);
for (unsigned int i = 0; i < protoCount; ++i)
{
Proto* p = luaF_newproto(L);
p->source = source;
p->bytecodeid = int(i);
p->maxstacksize = read<uint8_t>(data, size, offset);
p->numparams = read<uint8_t>(data, size, offset);
p->nups = read<uint8_t>(data, size, offset);
p->is_vararg = read<uint8_t>(data, size, offset);
if (version >= 4)
{
uint8_t cgflags = read<uint8_t>(data, size, offset);
LUAU_ASSERT(cgflags == 0);
uint32_t typesize = readVarInt(data, size, offset);
if (typesize)
{
uint8_t* types = (uint8_t*)data + offset;
LUAU_ASSERT(typesize == unsigned(2 + p->numparams));
LUAU_ASSERT(types[0] == LBC_TYPE_FUNCTION);
LUAU_ASSERT(types[1] == p->numparams);
offset += typesize;
}
}
p->sizecode = readVarInt(data, size, offset);
p->code = luaM_newarray(L, p->sizecode, Instruction, p->memcat);
for (int j = 0; j < p->sizecode; ++j)
p->code[j] = read<uint32_t>(data, size, offset);
p->codeentry = p->code;
p->sizek = readVarInt(data, size, offset);
p->k = luaM_newarray(L, p->sizek, TValue, p->memcat);
#ifdef HARDMEMTESTS
// this is redundant during normal runs, but resolveImportSafe can trigger GC checks under HARDMEMTESTS
// because p->k isn't fully formed at this point, we pre-fill it with nil to make subsequent setup safe
for (int j = 0; j < p->sizek; ++j)
{
setnilvalue(&p->k[j]);
}
#endif
for (int j = 0; j < p->sizek; ++j)
{
switch (read<uint8_t>(data, size, offset))
{
case LBC_CONSTANT_NIL:
setnilvalue(&p->k[j]);
break;
case LBC_CONSTANT_BOOLEAN:
{
uint8_t v = read<uint8_t>(data, size, offset);
setbvalue(&p->k[j], v);
break;
}
case LBC_CONSTANT_NUMBER:
{
double v = read<double>(data, size, offset);
setnvalue(&p->k[j], v);
break;
}
case LBC_CONSTANT_STRING:
{
TString* v = readString(strings, data, size, offset);
setsvalue(L, &p->k[j], v);
break;
}
case LBC_CONSTANT_IMPORT:
{
uint32_t iid = read<uint32_t>(data, size, offset);
resolveImportSafe(L, envt, p->k, iid);
setobj(L, &p->k[j], L->top - 1);
L->top--;
break;
}
case LBC_CONSTANT_TABLE:
{
int keys = readVarInt(data, size, offset);
Table* h = luaH_new(L, 0, keys);
for (int i = 0; i < keys; ++i)
{
int key = readVarInt(data, size, offset);
TValue* val = luaH_set(L, h, &p->k[key]);
setnvalue(val, 0.0);
}
sethvalue(L, &p->k[j], h);
break;
}
case LBC_CONSTANT_CLOSURE:
{
uint32_t fid = readVarInt(data, size, offset);
Closure* cl = luaF_newLclosure(L, protos[fid]->nups, envt, protos[fid]);
cl->preload = (cl->nupvalues > 0);
setclvalue(L, &p->k[j], cl);
break;
}
default:
LUAU_ASSERT(!"Unexpected constant kind");
}
}
p->sizep = readVarInt(data, size, offset);
p->p = luaM_newarray(L, p->sizep, Proto*, p->memcat);
for (int j = 0; j < p->sizep; ++j)
{
uint32_t fid = readVarInt(data, size, offset);
p->p[j] = protos[fid];
}
p->linedefined = readVarInt(data, size, offset);
p->debugname = readString(strings, data, size, offset);
uint8_t lineinfo = read<uint8_t>(data, size, offset);
if (lineinfo)
{
p->linegaplog2 = read<uint8_t>(data, size, offset);
int intervals = ((p->sizecode - 1) >> p->linegaplog2) + 1;
int absoffset = (p->sizecode + 3) & ~3;
p->sizelineinfo = absoffset + intervals * sizeof(int);
p->lineinfo = luaM_newarray(L, p->sizelineinfo, uint8_t, p->memcat);
p->abslineinfo = (int*)(p->lineinfo + absoffset);
uint8_t lastoffset = 0;
for (int j = 0; j < p->sizecode; ++j)
{
lastoffset += read<uint8_t>(data, size, offset);
p->lineinfo[j] = lastoffset;
}
int lastline = 0;
for (int j = 0; j < intervals; ++j)
{
lastline += read<int32_t>(data, size, offset);
p->abslineinfo[j] = lastline;
}
}
uint8_t debuginfo = read<uint8_t>(data, size, offset);
if (debuginfo)
{
p->sizelocvars = readVarInt(data, size, offset);
p->locvars = luaM_newarray(L, p->sizelocvars, LocVar, p->memcat);
for (int j = 0; j < p->sizelocvars; ++j)
{
p->locvars[j].varname = readString(strings, data, size, offset);
p->locvars[j].startpc = readVarInt(data, size, offset);
p->locvars[j].endpc = readVarInt(data, size, offset);
p->locvars[j].reg = read<uint8_t>(data, size, offset);
}
p->sizeupvalues = readVarInt(data, size, offset);
p->upvalues = luaM_newarray(L, p->sizeupvalues, TString*, p->memcat);
for (int j = 0; j < p->sizeupvalues; ++j)
{
p->upvalues[j] = readString(strings, data, size, offset);
}
}
protos[i] = p;
}
// "main" proto is pushed to Lua stack
uint32_t mainid = readVarInt(data, size, offset);
Proto* main = protos[mainid];
luaC_threadbarrier(L);
Closure* cl = luaF_newLclosure(L, 0, envt, main);
setclvalue(L, L->top, cl);
incr_top(L);
L->global->GCthreshold = GCthreshold;
return 0;
}

View File

@ -1,599 +0,0 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
#include "lvm.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "lgc.h"
#include "ldo.h"
#include "lnumutils.h"
#include <string.h>
// limit for table tag-method chains (to avoid loops)
#define MAXTAGLOOP 100
const TValue* luaV_tonumber(const TValue* obj, TValue* n)
{
double num;
if (ttisnumber(obj))
return obj;
if (ttisstring(obj) && luaO_str2d(svalue(obj), &num))
{
setnvalue(n, num);
return n;
}
else
return NULL;
}
int luaV_tostring(lua_State* L, StkId obj)
{
if (!ttisnumber(obj))
return 0;
else
{
char s[LUAI_MAXNUM2STR];
double n = nvalue(obj);
char* e = luai_num2str(s, n);
LUAU_ASSERT(e < s + sizeof(s));
setsvalue(L, obj, luaS_newlstr(L, s, e - s));
return 1;
}
}
const float* luaV_tovector(const TValue* obj)
{
if (ttisvector(obj))
return obj->value.v;
return nullptr;
}
static StkId callTMres(lua_State* L, StkId res, const TValue* f, const TValue* p1, const TValue* p2)
{
ptrdiff_t result = savestack(L, res);
// using stack room beyond top is technically safe here, but for very complicated reasons:
// * The stack guarantees EXTRA_STACK room beyond stack_last (see luaD_reallocstack) will be allocated
// * we cannot move luaD_checkstack above because the arguments are *sometimes* pointers to the lua
// stack and checkstack may invalidate those pointers
// * we cannot use savestack/restorestack because the arguments are sometimes on the C++ stack
// * during stack reallocation all of the allocated stack is copied (even beyond stack_last) so these
// values will be preserved even if they go past stack_last
LUAU_ASSERT((L->top + 3) < (L->stack + L->stacksize));
setobj2s(L, L->top, f); // push function
setobj2s(L, L->top + 1, p1); // 1st argument
setobj2s(L, L->top + 2, p2); // 2nd argument
luaD_checkstack(L, 3);
L->top += 3;
luaD_call(L, L->top - 3, 1);
res = restorestack(L, result);
L->top--;
setobj2s(L, res, L->top);
return res;
}
static void callTM(lua_State* L, const TValue* f, const TValue* p1, const TValue* p2, const TValue* p3)
{
// using stack room beyond top is technically safe here, but for very complicated reasons:
// * The stack guarantees EXTRA_STACK room beyond stack_last (see luaD_reallocstack) will be allocated
// * we cannot move luaD_checkstack above because the arguments are *sometimes* pointers to the lua
// stack and checkstack may invalidate those pointers
// * we cannot use savestack/restorestack because the arguments are sometimes on the C++ stack
// * during stack reallocation all of the allocated stack is copied (even beyond stack_last) so these
// values will be preserved even if they go past stack_last
LUAU_ASSERT((L->top + 4) < (L->stack + L->stacksize));
setobj2s(L, L->top, f); // push function
setobj2s(L, L->top + 1, p1); // 1st argument
setobj2s(L, L->top + 2, p2); // 2nd argument
setobj2s(L, L->top + 3, p3); // 3th argument
luaD_checkstack(L, 4);
L->top += 4;
luaD_call(L, L->top - 4, 0);
}
void luaV_gettable(lua_State* L, const TValue* t, TValue* key, StkId val)
{
int loop;
for (loop = 0; loop < MAXTAGLOOP; loop++)
{
const TValue* tm;
if (ttistable(t))
{ // `t' is a table?
Table* h = hvalue(t);
const TValue* res = luaH_get(h, key); // do a primitive get
if (res != luaO_nilobject)
L->cachedslot = gval2slot(h, res); // remember slot to accelerate future lookups
if (!ttisnil(res) // result is no nil?
|| (tm = fasttm(L, h->metatable, TM_INDEX)) == NULL)
{ // or no TM?
setobj2s(L, val, res);
return;
}
// t isn't a table, so see if it has an INDEX meta-method to look up the key with
}
else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_INDEX)))
luaG_indexerror(L, t, key);
if (ttisfunction(tm))
{
callTMres(L, val, tm, t, key);
return;
}
t = tm; // else repeat with `tm'
}
luaG_runerror(L, "'__index' chain too long; possible loop");
}
void luaV_settable(lua_State* L, const TValue* t, TValue* key, StkId val)
{
int loop;
TValue temp;
for (loop = 0; loop < MAXTAGLOOP; loop++)
{
const TValue* tm;
if (ttistable(t))
{ // `t' is a table?
Table* h = hvalue(t);
const TValue* oldval = luaH_get(h, key);
// should we assign the key? (if key is valid or __newindex is not set)
if (!ttisnil(oldval) || (tm = fasttm(L, h->metatable, TM_NEWINDEX)) == NULL)
{
if (h->readonly)
luaG_readonlyerror(L);
// luaH_set would work but would repeat the lookup so we use luaH_setslot that can reuse oldval if it's safe
TValue* newval = luaH_setslot(L, h, oldval, key);
L->cachedslot = gval2slot(h, newval); // remember slot to accelerate future lookups
setobj2t(L, newval, val);
luaC_barriert(L, h, val);
return;
}
// fallthrough to metamethod
}
else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_NEWINDEX)))
luaG_indexerror(L, t, key);
if (ttisfunction(tm))
{
callTM(L, tm, t, key, val);
return;
}
// else repeat with `tm'
setobj(L, &temp, tm); // avoid pointing inside table (may rehash)
t = &temp;
}
luaG_runerror(L, "'__newindex' chain too long; possible loop");
}
static int call_binTM(lua_State* L, const TValue* p1, const TValue* p2, StkId res, TMS event)
{
const TValue* tm = luaT_gettmbyobj(L, p1, event); // try first operand
if (ttisnil(tm))
tm = luaT_gettmbyobj(L, p2, event); // try second operand
if (ttisnil(tm))
return 0;
callTMres(L, res, tm, p1, p2);
return 1;
}
static const TValue* get_compTM(lua_State* L, Table* mt1, Table* mt2, TMS event)
{
const TValue* tm1 = fasttm(L, mt1, event);
const TValue* tm2;
if (tm1 == NULL)
return NULL; // no metamethod
if (mt1 == mt2)
return tm1; // same metatables => same metamethods
tm2 = fasttm(L, mt2, event);
if (tm2 == NULL)
return NULL; // no metamethod
if (luaO_rawequalObj(tm1, tm2)) // same metamethods?
return tm1;
return NULL;
}
static int call_orderTM(lua_State* L, const TValue* p1, const TValue* p2, TMS event, bool error = false)
{
const TValue* tm1 = luaT_gettmbyobj(L, p1, event);
const TValue* tm2;
if (ttisnil(tm1))
{
if (error)
luaG_ordererror(L, p1, p2, event);
return -1; // no metamethod?
}
tm2 = luaT_gettmbyobj(L, p2, event);
if (!luaO_rawequalObj(tm1, tm2)) // different metamethods?
{
if (error)
luaG_ordererror(L, p1, p2, event);
return -1;
}
callTMres(L, L->top, tm1, p1, p2);
return !l_isfalse(L->top);
}
int luaV_strcmp(const TString* ls, const TString* rs)
{
if (ls == rs)
return 0;
const char* l = getstr(ls);
const char* r = getstr(rs);
// always safe to read one character because even empty strings are nul terminated
if (*l != *r)
return uint8_t(*l) - uint8_t(*r);
size_t ll = ls->len;
size_t lr = rs->len;
size_t lmin = ll < lr ? ll : lr;
int res = memcmp(l, r, lmin);
if (res != 0)
return res;
return ll == lr ? 0 : ll < lr ? -1 : 1;
}
int luaV_lessthan(lua_State* L, const TValue* l, const TValue* r)
{
if (LUAU_UNLIKELY(ttype(l) != ttype(r)))
luaG_ordererror(L, l, r, TM_LT);
else if (LUAU_LIKELY(ttisnumber(l)))
return luai_numlt(nvalue(l), nvalue(r));
else if (ttisstring(l))
return luaV_strcmp(tsvalue(l), tsvalue(r)) < 0;
else
return call_orderTM(L, l, r, TM_LT, /* error= */ true);
}
int luaV_lessequal(lua_State* L, const TValue* l, const TValue* r)
{
int res;
if (ttype(l) != ttype(r))
luaG_ordererror(L, l, r, TM_LE);
else if (ttisnumber(l))
return luai_numle(nvalue(l), nvalue(r));
else if (ttisstring(l))
return luaV_strcmp(tsvalue(l), tsvalue(r)) <= 0;
else if ((res = call_orderTM(L, l, r, TM_LE)) != -1) // first try `le'
return res;
else if ((res = call_orderTM(L, r, l, TM_LT)) == -1) // error if not `lt'
luaG_ordererror(L, l, r, TM_LE);
return !res;
}
int luaV_equalval(lua_State* L, const TValue* t1, const TValue* t2)
{
const TValue* tm;
LUAU_ASSERT(ttype(t1) == ttype(t2));
switch (ttype(t1))
{
case LUA_TNIL:
return 1;
case LUA_TNUMBER:
return luai_numeq(nvalue(t1), nvalue(t2));
case LUA_TVECTOR:
return luai_veceq(vvalue(t1), vvalue(t2));
case LUA_TBOOLEAN:
return bvalue(t1) == bvalue(t2); // true must be 1 !!
case LUA_TLIGHTUSERDATA:
return pvalue(t1) == pvalue(t2);
case LUA_TUSERDATA:
{
tm = get_compTM(L, uvalue(t1)->metatable, uvalue(t2)->metatable, TM_EQ);
if (!tm)
return uvalue(t1) == uvalue(t2);
break; // will try TM
}
case LUA_TTABLE:
{
tm = get_compTM(L, hvalue(t1)->metatable, hvalue(t2)->metatable, TM_EQ);
if (!tm)
return hvalue(t1) == hvalue(t2);
break; // will try TM
}
default:
return gcvalue(t1) == gcvalue(t2);
}
callTMres(L, L->top, tm, t1, t2); // call TM
return !l_isfalse(L->top);
}
void luaV_concat(lua_State* L, int total, int last)
{
do
{
StkId top = L->base + last + 1;
int n = 2; // number of elements handled in this pass (at least 2)
if (!(ttisstring(top - 2) || ttisnumber(top - 2)) || !tostring(L, top - 1))
{
if (!call_binTM(L, top - 2, top - 1, top - 2, TM_CONCAT))
luaG_concaterror(L, top - 2, top - 1);
}
else if (tsvalue(top - 1)->len == 0) // second op is empty?
(void)tostring(L, top - 2); // result is first op (as string)
else
{
// at least two string values; get as many as possible
size_t tl = tsvalue(top - 1)->len;
char* buffer;
int i;
// collect total length
for (n = 1; n < total && tostring(L, top - n - 1); n++)
{
size_t l = tsvalue(top - n - 1)->len;
if (l > MAXSSIZE - tl)
luaG_runerror(L, "string length overflow");
tl += l;
}
char buf[LUA_BUFFERSIZE];
TString* ts = nullptr;
if (tl < LUA_BUFFERSIZE)
{
buffer = buf;
}
else
{
ts = luaS_bufstart(L, tl);
buffer = ts->data;
}
tl = 0;
for (i = n; i > 0; i--)
{ // concat all strings
size_t l = tsvalue(top - i)->len;
memcpy(buffer + tl, svalue(top - i), l);
tl += l;
}
if (tl < LUA_BUFFERSIZE)
{
setsvalue(L, top - n, luaS_newlstr(L, buffer, tl));
}
else
{
setsvalue(L, top - n, luaS_buffinish(L, ts));
}
}
total -= n - 1; // got `n' strings to create 1 new
last -= n - 1;
} while (total > 1); // repeat until only 1 result left
}
void luaV_doarith(lua_State* L, StkId ra, const TValue* rb, const TValue* rc, TMS op)
{
TValue tempb, tempc;
const TValue *b, *c;
if ((b = luaV_tonumber(rb, &tempb)) != NULL && (c = luaV_tonumber(rc, &tempc)) != NULL)
{
double nb = nvalue(b), nc = nvalue(c);
switch (op)
{
case TM_ADD:
setnvalue(ra, luai_numadd(nb, nc));
break;
case TM_SUB:
setnvalue(ra, luai_numsub(nb, nc));
break;
case TM_MUL:
setnvalue(ra, luai_nummul(nb, nc));
break;
case TM_DIV:
setnvalue(ra, luai_numdiv(nb, nc));
break;
case TM_MOD:
setnvalue(ra, luai_nummod(nb, nc));
break;
case TM_POW:
setnvalue(ra, luai_numpow(nb, nc));
break;
case TM_UNM:
setnvalue(ra, luai_numunm(nb));
break;
default:
LUAU_ASSERT(0);
break;
}
}
else
{
// vector operations that we support: v + v, v - v, v * v, s * v, v * s, v / v, s / v, v / s, -v
const float* vb = luaV_tovector(rb);
const float* vc = luaV_tovector(rc);
if (vb && vc)
{
switch (op)
{
case TM_ADD:
setvvalue(ra, vb[0] + vc[0], vb[1] + vc[1], vb[2] + vc[2], vb[3] + vc[3]);
return;
case TM_SUB:
setvvalue(ra, vb[0] - vc[0], vb[1] - vc[1], vb[2] - vc[2], vb[3] - vc[3]);
return;
case TM_MUL:
setvvalue(ra, vb[0] * vc[0], vb[1] * vc[1], vb[2] * vc[2], vb[3] * vc[3]);
return;
case TM_DIV:
setvvalue(ra, vb[0] / vc[0], vb[1] / vc[1], vb[2] / vc[2], vb[3] / vc[3]);
return;
case TM_UNM:
setvvalue(ra, -vb[0], -vb[1], -vb[2], -vb[3]);
return;
default:
break;
}
}
else if (vb)
{
c = luaV_tonumber(rc, &tempc);
if (c)
{
float nc = cast_to(float, nvalue(c));
switch (op)
{
case TM_MUL:
setvvalue(ra, vb[0] * nc, vb[1] * nc, vb[2] * nc, vb[3] * nc);
return;
case TM_DIV:
setvvalue(ra, vb[0] / nc, vb[1] / nc, vb[2] / nc, vb[3] / nc);
return;
default:
break;
}
}
}
else if (vc)
{
b = luaV_tonumber(rb, &tempb);
if (b)
{
float nb = cast_to(float, nvalue(b));
switch (op)
{
case TM_MUL:
setvvalue(ra, nb * vc[0], nb * vc[1], nb * vc[2], nb * vc[3]);
return;
case TM_DIV:
setvvalue(ra, nb / vc[0], nb / vc[1], nb / vc[2], nb / vc[3]);
return;
default:
break;
}
}
}
if (!call_binTM(L, rb, rc, ra, op))
{
luaG_aritherror(L, rb, rc, op);
}
}
}
void luaV_dolen(lua_State* L, StkId ra, const TValue* rb)
{
const TValue* tm = NULL;
switch (ttype(rb))
{
case LUA_TTABLE:
{
Table* h = hvalue(rb);
if ((tm = fasttm(L, h->metatable, TM_LEN)) == NULL)
{
setnvalue(ra, cast_num(luaH_getn(h)));
return;
}
break;
}
case LUA_TSTRING:
{
TString* ts = tsvalue(rb);
setnvalue(ra, cast_num(ts->len));
return;
}
default:
tm = luaT_gettmbyobj(L, rb, TM_LEN);
}
if (ttisnil(tm))
luaG_typeerror(L, rb, "get length of");
StkId res = callTMres(L, ra, tm, rb, luaO_nilobject);
if (!ttisnumber(res))
luaG_runerror(L, "'__len' must return a number"); // note, we can't access rb since stack may have been reallocated
}
LUAU_NOINLINE void luaV_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit)
{
if (!ttisnumber(pinit) && !luaV_tonumber(pinit, pinit))
luaG_forerror(L, pinit, "initial value");
if (!ttisnumber(plimit) && !luaV_tonumber(plimit, plimit))
luaG_forerror(L, plimit, "limit");
if (!ttisnumber(pstep) && !luaV_tonumber(pstep, pstep))
luaG_forerror(L, pstep, "step");
}
// calls a C function f with no yielding support; optionally save one resulting value to the res register
// the function and arguments have to already be pushed to L->top
LUAU_NOINLINE void luaV_callTM(lua_State* L, int nparams, int res)
{
++L->nCcalls;
if (L->nCcalls >= LUAI_MAXCCALLS)
luaD_checkCstack(L);
luaD_checkstack(L, LUA_MINSTACK);
StkId top = L->top;
StkId fun = top - nparams - 1;
CallInfo* ci = incr_ci(L);
ci->func = fun;
ci->base = fun + 1;
ci->top = top + LUA_MINSTACK;
ci->savedpc = NULL;
ci->flags = 0;
ci->nresults = (res >= 0);
LUAU_ASSERT(ci->top <= L->stack_last);
LUAU_ASSERT(ttisfunction(ci->func));
LUAU_ASSERT(clvalue(ci->func)->isC);
L->base = fun + 1;
LUAU_ASSERT(L->top == L->base + nparams);
lua_CFunction func = clvalue(fun)->c.f;
int n = func(L);
LUAU_ASSERT(n >= 0); // yields should have been blocked by nCcalls
// ci is our callinfo, cip is our parent
// note that we read L->ci again since it may have been reallocated by the call
CallInfo* cip = L->ci - 1;
// copy return value into parent stack
if (res >= 0)
{
if (n > 0)
{
setobj2s(L, &cip->base[res], L->top - n);
}
else
{
setnilvalue(&cip->base[res]);
}
}
L->ci = cip;
L->base = cip->base;
L->top = cip->top;
--L->nCcalls;
}
LUAU_NOINLINE void luaV_tryfuncTM(lua_State* L, StkId func)
{
const TValue* tm = luaT_gettmbyobj(L, func, TM_CALL);
if (!ttisfunction(tm))
luaG_typeerror(L, func, "call");
for (StkId p = L->top; p > func; p--) // open space for metamethod
setobj2s(L, p, p - 1);
L->top++; // stack space pre-allocated by the caller
setobj2s(L, func, tm); // tag method is the new function to be called
}

View File

@ -1,6 +1,7 @@
use std::env;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::Command;
pub struct Build {
out_dir: Option<PathBuf>,
@ -14,6 +15,7 @@ pub struct Build {
enable_codegen: bool,
// Vector size, must be 3 (default) or 4
vector_size: usize,
luau_src: String,
}
pub struct Artifacts {
@ -34,6 +36,7 @@ impl Build {
use_longjmp: false,
enable_codegen: false,
vector_size: 3,
luau_src: "https://git.pfaff.dev/michael/luau.git".into(),
}
}
@ -73,6 +76,11 @@ impl Build {
self
}
pub fn luau_src(&mut self, git_url: impl Into<String>) -> &mut Build {
self.luau_src = git_url.into();
self
}
pub fn build(&mut self) -> Artifacts {
let target = &self.target.as_ref().expect("TARGET not set")[..];
let host = &self.host.as_ref().expect("HOST not set")[..];
@ -80,19 +88,36 @@ impl Build {
let lib_dir = out_dir.join("lib");
let include_dir = out_dir.join("include");
let source_dir_base = Path::new(env!("CARGO_MANIFEST_DIR"));
let common_include_dir = source_dir_base.join("luau").join("Common").join("include");
let ast_source_dir = source_dir_base.join("luau").join("Ast").join("src");
let ast_include_dir = source_dir_base.join("luau").join("Ast").join("include");
let codegen_source_dir = source_dir_base.join("luau").join("CodeGen").join("src");
let codegen_include_dir = source_dir_base.join("luau").join("CodeGen").join("include");
let compiler_source_dir = source_dir_base.join("luau").join("Compiler").join("src");
let source_dir_base = out_dir.join("luau");
if !out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
// Clone the sources
if source_dir_base.exists() {
fs::remove_dir_all(&source_dir_base).unwrap();
}
let status = Command::new("git")
.args(["clone", "--depth=1", &self.luau_src])
.current_dir(out_dir)
.status()
.unwrap();
if !status.success() {
panic!("Failed to clone sources");
}
let common_include_dir = source_dir_base.join("Common").join("include");
let ast_source_dir = source_dir_base.join("Ast").join("src");
let ast_include_dir = source_dir_base.join("Ast").join("include");
let codegen_source_dir = source_dir_base.join("CodeGen").join("src");
let codegen_include_dir = source_dir_base.join("CodeGen").join("include");
let compiler_source_dir = source_dir_base.join("Compiler").join("src");
let compiler_include_dir = source_dir_base
.join("luau")
.join("Compiler")
.join("include");
let vm_source_dir = source_dir_base.join("luau").join("VM").join("src");
let vm_include_dir = source_dir_base.join("luau").join("VM").join("include");
let vm_source_dir = source_dir_base.join("VM").join("src");
let vm_include_dir = source_dir_base.join("VM").join("include");
// Cleanup
if lib_dir.exists() {