v0.5.9+luau579
This commit is contained in:
parent
c052eabd34
commit
e88e1f2a89
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "luau0-src"
|
||||
version = "0.5.8+luau577"
|
||||
version = "0.5.9+luau579"
|
||||
authors = ["Aleksandr Orlenko <zxteam@protonmail.com>"]
|
||||
edition = "2021"
|
||||
repository = "https://github.com/khvzak/luau-src-rs"
|
||||
|
|
|
@ -157,6 +157,8 @@ public:
|
|||
void fcmpz(RegisterA64 src);
|
||||
void fcsel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond);
|
||||
|
||||
void udf();
|
||||
|
||||
// Run final checks
|
||||
bool finalize();
|
||||
|
||||
|
|
|
@ -99,6 +99,7 @@ public:
|
|||
void call(OperandX64 op);
|
||||
|
||||
void int3();
|
||||
void ud2();
|
||||
|
||||
void bsr(RegisterX64 dst, OperandX64 src);
|
||||
void bsf(RegisterX64 dst, OperandX64 src);
|
||||
|
|
|
@ -38,7 +38,6 @@ struct IrBuilder
|
|||
|
||||
IrOp undef();
|
||||
|
||||
IrOp constBool(bool value);
|
||||
IrOp constInt(int value);
|
||||
IrOp constUint(unsigned value);
|
||||
IrOp constDouble(double value);
|
||||
|
|
|
@ -283,7 +283,7 @@ enum class IrCmd : uint8_t
|
|||
// A: builtin
|
||||
// B: Rn (result start)
|
||||
// C: Rn (argument start)
|
||||
// D: Rn or Kn or a boolean that's false (optional second argument)
|
||||
// D: Rn or Kn or undef (optional second argument)
|
||||
// E: int (argument count)
|
||||
// F: int (result count)
|
||||
FASTCALL,
|
||||
|
@ -292,7 +292,7 @@ enum class IrCmd : uint8_t
|
|||
// A: builtin
|
||||
// B: Rn (result start)
|
||||
// C: Rn (argument start)
|
||||
// D: Rn or Kn or a boolean that's false (optional second argument)
|
||||
// D: Rn or Kn or undef (optional second argument)
|
||||
// E: int (argument count or -1 to use all arguments up to stack top)
|
||||
// F: int (result count or -1 to preserve all results and adjust stack top)
|
||||
INVOKE_FASTCALL,
|
||||
|
@ -360,39 +360,46 @@ enum class IrCmd : uint8_t
|
|||
|
||||
// Guard against tag mismatch
|
||||
// A, B: tag
|
||||
// C: block
|
||||
// C: block/undef
|
||||
// In final x64 lowering, A can also be Rn
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_TAG,
|
||||
|
||||
// Guard against readonly table
|
||||
// A: pointer (Table)
|
||||
// B: block
|
||||
// B: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_READONLY,
|
||||
|
||||
// Guard against table having a metatable
|
||||
// A: pointer (Table)
|
||||
// B: block
|
||||
// B: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_NO_METATABLE,
|
||||
|
||||
// Guard against executing in unsafe environment
|
||||
// A: block
|
||||
// A: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_SAFE_ENV,
|
||||
|
||||
// Guard against index overflowing the table array size
|
||||
// A: pointer (Table)
|
||||
// B: int (index)
|
||||
// C: block
|
||||
// C: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_ARRAY_SIZE,
|
||||
|
||||
// Guard against cached table node slot not matching the actual table node slot for a key
|
||||
// A: pointer (LuaNode)
|
||||
// B: Kn
|
||||
// C: block
|
||||
// C: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_SLOT_MATCH,
|
||||
|
||||
// Guard against table node with a linked next node to ensure that our lookup hits the main position of the key
|
||||
// A: pointer (LuaNode)
|
||||
// B: block
|
||||
// B: block/undef
|
||||
// When undef is specified instead of a block, execution is aborted on check failure
|
||||
CHECK_NODE_NO_NEXT,
|
||||
|
||||
// Special operations
|
||||
|
@ -428,7 +435,7 @@ enum class IrCmd : uint8_t
|
|||
|
||||
// While capture is a no-op right now, it might be useful to track register/upvalue lifetimes
|
||||
// A: Rn or UPn
|
||||
// B: boolean (true for reference capture, false for value capture)
|
||||
// B: unsigned int (1 for reference capture, 0 for value capture)
|
||||
CAPTURE,
|
||||
|
||||
// Operations that don't have an IR representation yet
|
||||
|
@ -581,7 +588,6 @@ enum class IrCmd : uint8_t
|
|||
|
||||
enum class IrConstKind : uint8_t
|
||||
{
|
||||
Bool,
|
||||
Int,
|
||||
Uint,
|
||||
Double,
|
||||
|
@ -867,27 +873,6 @@ struct IrFunction
|
|||
return value.valueTag;
|
||||
}
|
||||
|
||||
bool boolOp(IrOp op)
|
||||
{
|
||||
IrConst& value = constOp(op);
|
||||
|
||||
LUAU_ASSERT(value.kind == IrConstKind::Bool);
|
||||
return value.valueBool;
|
||||
}
|
||||
|
||||
std::optional<bool> asBoolOp(IrOp op)
|
||||
{
|
||||
if (op.kind != IrOpKind::Constant)
|
||||
return std::nullopt;
|
||||
|
||||
IrConst& value = constOp(op);
|
||||
|
||||
if (value.kind != IrConstKind::Bool)
|
||||
return std::nullopt;
|
||||
|
||||
return value.valueBool;
|
||||
}
|
||||
|
||||
int intOp(IrOp op)
|
||||
{
|
||||
IrConst& value = constOp(op);
|
||||
|
|
|
@ -687,6 +687,11 @@ void AssemblyBuilderA64::fcsel(RegisterA64 dst, RegisterA64 src1, RegisterA64 sr
|
|||
placeCS("fcsel", dst, src1, src2, cond, 0b11110'01'1, 0b11);
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::udf()
|
||||
{
|
||||
place0("udf", 0);
|
||||
}
|
||||
|
||||
bool AssemblyBuilderA64::finalize()
|
||||
{
|
||||
code.resize(codePos - code.data());
|
||||
|
|
|
@ -472,6 +472,15 @@ void AssemblyBuilderX64::int3()
|
|||
commit();
|
||||
}
|
||||
|
||||
void AssemblyBuilderX64::ud2()
|
||||
{
|
||||
if (logText)
|
||||
log("ud2");
|
||||
|
||||
place(0x0f);
|
||||
place(0x0b);
|
||||
}
|
||||
|
||||
void AssemblyBuilderX64::bsr(RegisterX64 dst, OperandX64 src)
|
||||
{
|
||||
if (logText)
|
||||
|
@ -1406,7 +1415,7 @@ void AssemblyBuilderX64::commit()
|
|||
{
|
||||
LUAU_ASSERT(codePos <= codeEnd);
|
||||
|
||||
if (codeEnd - codePos < kMaxInstructionLength)
|
||||
if (unsigned(codeEnd - codePos) < kMaxInstructionLength)
|
||||
extend();
|
||||
}
|
||||
|
||||
|
|
|
@ -51,13 +51,15 @@ static void makePagesExecutable(uint8_t* mem, size_t size)
|
|||
|
||||
DWORD oldProtect;
|
||||
if (VirtualProtect(mem, size, PAGE_EXECUTE_READ, &oldProtect) == 0)
|
||||
LUAU_ASSERT(!"failed to change page protection");
|
||||
LUAU_ASSERT(!"Failed to change page protection");
|
||||
}
|
||||
|
||||
static void flushInstructionCache(uint8_t* mem, size_t size)
|
||||
{
|
||||
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM)
|
||||
if (FlushInstructionCache(GetCurrentProcess(), mem, size) == 0)
|
||||
LUAU_ASSERT(!"failed to flush instruction cache");
|
||||
LUAU_ASSERT(!"Failed to flush instruction cache");
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static uint8_t* allocatePages(size_t size)
|
||||
|
@ -68,7 +70,7 @@ static uint8_t* allocatePages(size_t size)
|
|||
static void freePages(uint8_t* mem, size_t size)
|
||||
{
|
||||
if (munmap(mem, alignToPageSize(size)) != 0)
|
||||
LUAU_ASSERT(!"failed to deallocate block memory");
|
||||
LUAU_ASSERT(!"Failed to deallocate block memory");
|
||||
}
|
||||
|
||||
static void makePagesExecutable(uint8_t* mem, size_t size)
|
||||
|
@ -77,7 +79,7 @@ static void makePagesExecutable(uint8_t* mem, size_t size)
|
|||
LUAU_ASSERT(size == alignToPageSize(size));
|
||||
|
||||
if (mprotect(mem, size, PROT_READ | PROT_EXEC) != 0)
|
||||
LUAU_ASSERT(!"failed to change page protection");
|
||||
LUAU_ASSERT(!"Failed to change page protection");
|
||||
}
|
||||
|
||||
static void flushInstructionCache(uint8_t* mem, size_t size)
|
||||
|
|
|
@ -80,7 +80,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
|
|||
#if defined(_WIN32) && defined(_M_X64)
|
||||
if (!RtlAddFunctionTable((RUNTIME_FUNCTION*)block, uint32_t(unwind->getFunctionCount()), uintptr_t(block)))
|
||||
{
|
||||
LUAU_ASSERT(!"failed to allocate function table");
|
||||
LUAU_ASSERT(!"Failed to allocate function table");
|
||||
return nullptr;
|
||||
}
|
||||
#elif defined(__linux__) || defined(__APPLE__)
|
||||
|
@ -95,7 +95,7 @@ void destroyBlockUnwindInfo(void* context, void* unwindData)
|
|||
{
|
||||
#if defined(_WIN32) && defined(_M_X64)
|
||||
if (!RtlDeleteFunctionTable((RUNTIME_FUNCTION*)unwindData))
|
||||
LUAU_ASSERT(!"failed to deallocate function table");
|
||||
LUAU_ASSERT(!"Failed to deallocate function table");
|
||||
#elif defined(__linux__) || defined(__APPLE__)
|
||||
visitFdeEntries((char*)unwindData, __deregister_frame);
|
||||
#endif
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include "Luau/AssemblyBuilderA64.h"
|
||||
#include "Luau/AssemblyBuilderX64.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "NativeState.h"
|
||||
|
||||
#include "CodeGenA64.h"
|
||||
|
@ -59,6 +58,8 @@ namespace Luau
|
|||
namespace CodeGen
|
||||
{
|
||||
|
||||
static const Instruction kCodeEntryInsn = LOP_NATIVECALL;
|
||||
|
||||
static void* gPerfLogContext = nullptr;
|
||||
static PerfLogFn gPerfLogFn = nullptr;
|
||||
|
||||
|
@ -332,9 +333,15 @@ static std::optional<NativeProto> assembleFunction(AssemblyBuilder& build, Nativ
|
|||
return createNativeProto(proto, ir);
|
||||
}
|
||||
|
||||
static NativeState* getNativeState(lua_State* L)
|
||||
{
|
||||
return static_cast<NativeState*>(L->global->ecb.context);
|
||||
}
|
||||
|
||||
static void onCloseState(lua_State* L)
|
||||
{
|
||||
destroyNativeState(L);
|
||||
delete getNativeState(L);
|
||||
L->global->ecb = lua_ExecutionCallbacks();
|
||||
}
|
||||
|
||||
static void onDestroyFunction(lua_State* L, Proto* proto)
|
||||
|
@ -342,6 +349,7 @@ static void onDestroyFunction(lua_State* L, Proto* proto)
|
|||
destroyExecData(proto->execdata);
|
||||
proto->execdata = nullptr;
|
||||
proto->exectarget = 0;
|
||||
proto->codeentry = proto->code;
|
||||
}
|
||||
|
||||
static int onEnter(lua_State* L, Proto* proto)
|
||||
|
@ -362,7 +370,7 @@ static void onSetBreakpoint(lua_State* L, Proto* proto, int instruction)
|
|||
if (!proto->execdata)
|
||||
return;
|
||||
|
||||
LUAU_ASSERT(!"native breakpoints are not implemented");
|
||||
LUAU_ASSERT(!"Native breakpoints are not implemented");
|
||||
}
|
||||
|
||||
#if defined(__aarch64__)
|
||||
|
@ -430,39 +438,34 @@ void create(lua_State* L)
|
|||
{
|
||||
LUAU_ASSERT(isSupported());
|
||||
|
||||
NativeState& data = *createNativeState(L);
|
||||
std::unique_ptr<NativeState> data = std::make_unique<NativeState>();
|
||||
|
||||
#if defined(_WIN32)
|
||||
data.unwindBuilder = std::make_unique<UnwindBuilderWin>();
|
||||
data->unwindBuilder = std::make_unique<UnwindBuilderWin>();
|
||||
#else
|
||||
data.unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
|
||||
data->unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
|
||||
#endif
|
||||
|
||||
data.codeAllocator.context = data.unwindBuilder.get();
|
||||
data.codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
|
||||
data.codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
|
||||
data->codeAllocator.context = data->unwindBuilder.get();
|
||||
data->codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
|
||||
data->codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
|
||||
|
||||
initFunctions(data);
|
||||
initFunctions(*data);
|
||||
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
if (!X64::initHeaderFunctions(data))
|
||||
{
|
||||
destroyNativeState(L);
|
||||
if (!X64::initHeaderFunctions(*data))
|
||||
return;
|
||||
}
|
||||
#elif defined(__aarch64__)
|
||||
if (!A64::initHeaderFunctions(data))
|
||||
{
|
||||
destroyNativeState(L);
|
||||
if (!A64::initHeaderFunctions(*data))
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (gPerfLogFn)
|
||||
gPerfLogFn(gPerfLogContext, uintptr_t(data.context.gateEntry), 4096, "<luau gate>");
|
||||
gPerfLogFn(gPerfLogContext, uintptr_t(data->context.gateEntry), 4096, "<luau gate>");
|
||||
|
||||
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
|
||||
lua_ExecutionCallbacks* ecb = &L->global->ecb;
|
||||
|
||||
ecb->context = data.release();
|
||||
ecb->close = onCloseState;
|
||||
ecb->destroy = onDestroyFunction;
|
||||
ecb->enter = onEnter;
|
||||
|
@ -490,7 +493,8 @@ void compile(lua_State* L, int idx)
|
|||
const TValue* func = luaA_toobject(L, idx);
|
||||
|
||||
// If initialization has failed, do not compile any functions
|
||||
if (!getNativeState(L))
|
||||
NativeState* data = getNativeState(L);
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
#if defined(__aarch64__)
|
||||
|
@ -499,8 +503,6 @@ void compile(lua_State* L, int idx)
|
|||
X64::AssemblyBuilderX64 build(/* logText= */ false);
|
||||
#endif
|
||||
|
||||
NativeState* data = getNativeState(L);
|
||||
|
||||
std::vector<Proto*> protos;
|
||||
gatherFunctions(protos, clvalue(func)->l.p);
|
||||
|
||||
|
@ -564,6 +566,7 @@ void compile(lua_State* L, int idx)
|
|||
// the memory is now managed by VM and will be freed via onDestroyFunction
|
||||
result.p->execdata = result.execdata;
|
||||
result.p->exectarget = uintptr_t(codeStart) + result.exectarget;
|
||||
result.p->codeentry = &kCodeEntryInsn;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
#include "Luau/UnwindBuilder.h"
|
||||
|
||||
#include "BitUtils.h"
|
||||
#include "CustomExecUtils.h"
|
||||
#include "NativeState.h"
|
||||
#include "EmitCommonA64.h"
|
||||
|
||||
|
@ -95,13 +94,14 @@ static void emitReentry(AssemblyBuilderA64& build, ModuleHelpers& helpers)
|
|||
build.ldr(x2, mem(rState, offsetof(lua_State, ci))); // L->ci
|
||||
|
||||
// We need to check if the new frame can be executed natively
|
||||
// TOOD: .flags and .savedpc load below can be fused with ldp
|
||||
// TODO: .flags and .savedpc load below can be fused with ldp
|
||||
build.ldr(w3, mem(x2, offsetof(CallInfo, flags)));
|
||||
build.tbz(x3, countrz(LUA_CALLINFO_CUSTOM), helpers.exitContinueVm);
|
||||
build.tbz(x3, countrz(LUA_CALLINFO_NATIVE), helpers.exitContinueVm);
|
||||
|
||||
build.mov(rClosure, x0);
|
||||
build.ldr(rConstants, mem(x1, offsetof(Proto, k))); // proto->k
|
||||
build.ldr(rCode, mem(x1, offsetof(Proto, code))); // proto->code
|
||||
|
||||
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
||||
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
||||
|
||||
// Get instruction index from instruction pointer
|
||||
// To get instruction index from instruction pointer, we need to divide byte offset by 4
|
||||
|
@ -145,8 +145,9 @@ static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilde
|
|||
build.mov(rNativeContext, x3);
|
||||
|
||||
build.ldr(rBase, mem(x0, offsetof(lua_State, base))); // L->base
|
||||
build.ldr(rConstants, mem(x1, offsetof(Proto, k))); // proto->k
|
||||
build.ldr(rCode, mem(x1, offsetof(Proto, code))); // proto->code
|
||||
|
||||
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
||||
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
||||
|
||||
build.ldr(x9, mem(x0, offsetof(lua_State, ci))); // L->ci
|
||||
build.ldr(x9, mem(x9, offsetof(CallInfo, func))); // L->ci->func
|
||||
|
@ -194,7 +195,7 @@ bool initHeaderFunctions(NativeState& data)
|
|||
if (!data.codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast<const uint8_t*>(build.code.data()),
|
||||
int(build.code.size() * sizeof(build.code[0])), data.gateData, data.gateDataSize, codeStart))
|
||||
{
|
||||
LUAU_ASSERT(!"failed to create entry function");
|
||||
LUAU_ASSERT(!"Failed to create entry function");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#include "CodeGenUtils.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
|
||||
#include "lvm.h"
|
||||
|
||||
#include "lbuiltins.h"
|
||||
|
@ -268,7 +266,7 @@ Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults)
|
|||
ci->savedpc = p->code;
|
||||
|
||||
if (LUAU_LIKELY(p->execdata != NULL))
|
||||
ci->flags = LUA_CALLINFO_CUSTOM;
|
||||
ci->flags = LUA_CALLINFO_NATIVE;
|
||||
|
||||
return ccl;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "Luau/AssemblyBuilderX64.h"
|
||||
#include "Luau/UnwindBuilder.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "NativeState.h"
|
||||
#include "EmitCommonX64.h"
|
||||
|
||||
|
@ -160,7 +159,7 @@ bool initHeaderFunctions(NativeState& data)
|
|||
if (!data.codeAllocator.allocate(
|
||||
build.data.data(), int(build.data.size()), build.code.data(), int(build.code.size()), data.gateData, data.gateDataSize, codeStart))
|
||||
{
|
||||
LUAU_ASSERT(!"failed to create entry function");
|
||||
LUAU_ASSERT(!"Failed to create entry function");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,106 +0,0 @@
|
|||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#pragma once
|
||||
|
||||
#include "NativeState.h"
|
||||
|
||||
#include "lobject.h"
|
||||
#include "lstate.h"
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
namespace CodeGen
|
||||
{
|
||||
|
||||
// Here we define helper functions to wrap interaction with Luau custom execution API so that it works with or without LUA_CUSTOM_EXECUTION
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
|
||||
inline lua_ExecutionCallbacks* getExecutionCallbacks(lua_State* L)
|
||||
{
|
||||
return &L->global->ecb;
|
||||
}
|
||||
|
||||
inline NativeState* getNativeState(lua_State* L)
|
||||
{
|
||||
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
|
||||
return (NativeState*)ecb->context;
|
||||
}
|
||||
|
||||
inline void setNativeState(lua_State* L, NativeState* nativeState)
|
||||
{
|
||||
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
|
||||
ecb->context = nativeState;
|
||||
}
|
||||
|
||||
inline NativeState* createNativeState(lua_State* L)
|
||||
{
|
||||
NativeState* state = new NativeState();
|
||||
setNativeState(L, state);
|
||||
return state;
|
||||
}
|
||||
|
||||
inline void destroyNativeState(lua_State* L)
|
||||
{
|
||||
NativeState* state = getNativeState(L);
|
||||
setNativeState(L, nullptr);
|
||||
delete state;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
inline lua_ExecutionCallbacks* getExecutionCallbacks(lua_State* L)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline NativeState* getNativeState(lua_State* L)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline void setNativeState(lua_State* L, NativeState* nativeState) {}
|
||||
|
||||
inline NativeState* createNativeState(lua_State* L)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline void destroyNativeState(lua_State* L) {}
|
||||
|
||||
#endif
|
||||
|
||||
inline int getOpLength(LuauOpcode op)
|
||||
{
|
||||
switch (op)
|
||||
{
|
||||
case LOP_GETGLOBAL:
|
||||
case LOP_SETGLOBAL:
|
||||
case LOP_GETIMPORT:
|
||||
case LOP_GETTABLEKS:
|
||||
case LOP_SETTABLEKS:
|
||||
case LOP_NAMECALL:
|
||||
case LOP_JUMPIFEQ:
|
||||
case LOP_JUMPIFLE:
|
||||
case LOP_JUMPIFLT:
|
||||
case LOP_JUMPIFNOTEQ:
|
||||
case LOP_JUMPIFNOTLE:
|
||||
case LOP_JUMPIFNOTLT:
|
||||
case LOP_NEWTABLE:
|
||||
case LOP_SETLIST:
|
||||
case LOP_FORGLOOP:
|
||||
case LOP_LOADKX:
|
||||
case LOP_FASTCALL2:
|
||||
case LOP_FASTCALL2K:
|
||||
case LOP_JUMPXEQKNIL:
|
||||
case LOP_JUMPXEQKB:
|
||||
case LOP_JUMPXEQKN:
|
||||
case LOP_JUMPXEQKS:
|
||||
return 2;
|
||||
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace CodeGen
|
||||
} // namespace Luau
|
|
@ -119,7 +119,6 @@ void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int r
|
|||
return emitBuiltinTypeof(regs, build, ra, arg);
|
||||
default:
|
||||
LUAU_ASSERT(!"Missing x64 lowering");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
#include "Luau/IrData.h"
|
||||
#include "Luau/IrRegAllocX64.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "NativeState.h"
|
||||
|
||||
#include "lgc.h"
|
||||
|
|
|
@ -184,33 +184,6 @@ inline void jumpIfTruthy(AssemblyBuilderX64& build, int ri, Label& target, Label
|
|||
build.jcc(ConditionX64::NotEqual, target); // true if boolean value is 'true'
|
||||
}
|
||||
|
||||
inline void jumpIfNodeKeyTagIsNot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, lua_Type tag, Label& label)
|
||||
{
|
||||
tmp.size = SizeX64::dword;
|
||||
|
||||
build.mov(tmp, luauNodeKeyTag(node));
|
||||
build.and_(tmp, kTKeyTagMask);
|
||||
build.cmp(tmp, tag);
|
||||
build.jcc(ConditionX64::NotEqual, label);
|
||||
}
|
||||
|
||||
inline void jumpIfNodeValueTagIs(AssemblyBuilderX64& build, RegisterX64 node, lua_Type tag, Label& label)
|
||||
{
|
||||
build.cmp(dword[node + offsetof(LuaNode, val) + offsetof(TValue, tt)], tag);
|
||||
build.jcc(ConditionX64::Equal, label);
|
||||
}
|
||||
|
||||
inline void jumpIfNodeKeyNotInExpectedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, OperandX64 expectedKey, Label& label)
|
||||
{
|
||||
jumpIfNodeKeyTagIsNot(build, tmp, node, LUA_TSTRING, label);
|
||||
|
||||
build.mov(tmp, expectedKey);
|
||||
build.cmp(tmp, luauNodeKeyValue(node));
|
||||
build.jcc(ConditionX64::NotEqual, label);
|
||||
|
||||
jumpIfNodeValueTagIs(build, node, LUA_TNIL, label);
|
||||
}
|
||||
|
||||
void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, OperandX64 rhs, IrCondition cond, Label& label);
|
||||
void jumpOnAnyCmpFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, IrCondition cond, Label& label);
|
||||
|
||||
|
|
|
@ -4,8 +4,10 @@
|
|||
#include "Luau/AssemblyBuilderX64.h"
|
||||
#include "Luau/IrRegAllocX64.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "EmitCommonX64.h"
|
||||
#include "NativeState.h"
|
||||
|
||||
#include "lstate.h"
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -87,8 +89,8 @@ void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int
|
|||
build.test(rax, rax);
|
||||
build.jcc(ConditionX64::Zero, helpers.continueCallInVm);
|
||||
|
||||
// Mark call frame as custom
|
||||
build.mov(dword[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_CUSTOM);
|
||||
// Mark call frame as native
|
||||
build.mov(dword[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_NATIVE);
|
||||
|
||||
// Switch current constants
|
||||
build.mov(rConstants, qword[proto + offsetof(Proto, k)]);
|
||||
|
@ -298,7 +300,7 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, i
|
|||
|
||||
build.mov(execdata, qword[proto + offsetof(Proto, execdata)]);
|
||||
|
||||
build.test(byte[cip + offsetof(CallInfo, flags)], LUA_CALLINFO_CUSTOM);
|
||||
build.test(byte[cip + offsetof(CallInfo, flags)], LUA_CALLINFO_NATIVE);
|
||||
build.jcc(ConditionX64::Zero, helpers.exitContinueVm); // Continue in interpreter if function has no native data
|
||||
|
||||
// Change constants
|
||||
|
|
|
@ -113,7 +113,7 @@ uint32_t getNextInstUse(IrFunction& function, uint32_t targetInstIdx, uint32_t s
|
|||
}
|
||||
|
||||
// There must be a next use since there is the last use location
|
||||
LUAU_ASSERT(!"failed to find next use");
|
||||
LUAU_ASSERT(!"Failed to find next use");
|
||||
return targetInst.lastUse;
|
||||
}
|
||||
|
||||
|
@ -338,7 +338,7 @@ static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock&
|
|||
case IrCmd::CAPTURE:
|
||||
maybeUse(inst.a);
|
||||
|
||||
if (function.boolOp(inst.b))
|
||||
if (function.uintOp(inst.b) == 1)
|
||||
capturedRegs.set(vmRegOp(inst.a), true);
|
||||
break;
|
||||
case IrCmd::SETLIST:
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "Luau/IrAnalysis.h"
|
||||
#include "Luau/IrUtils.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "IrTranslation.h"
|
||||
|
||||
#include "lapi.h"
|
||||
|
@ -19,7 +18,7 @@ namespace CodeGen
|
|||
constexpr unsigned kNoAssociatedBlockIndex = ~0u;
|
||||
|
||||
IrBuilder::IrBuilder()
|
||||
: constantMap({IrConstKind::Bool, ~0ull})
|
||||
: constantMap({IrConstKind::Tag, ~0ull})
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -410,8 +409,7 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
|
|||
break;
|
||||
}
|
||||
default:
|
||||
LUAU_ASSERT(!"unknown instruction");
|
||||
break;
|
||||
LUAU_ASSERT(!"Unknown instruction");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -449,7 +447,7 @@ void IrBuilder::clone(const IrBlock& source, bool removeCurrentTerminator)
|
|||
if (const uint32_t* newIndex = instRedir.find(op.index))
|
||||
op.index = *newIndex;
|
||||
else
|
||||
LUAU_ASSERT(!"values can only be used if they are defined in the same block");
|
||||
LUAU_ASSERT(!"Values can only be used if they are defined in the same block");
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -501,14 +499,6 @@ IrOp IrBuilder::undef()
|
|||
return {IrOpKind::Undef, 0};
|
||||
}
|
||||
|
||||
IrOp IrBuilder::constBool(bool value)
|
||||
{
|
||||
IrConst constant;
|
||||
constant.kind = IrConstKind::Bool;
|
||||
constant.valueBool = value;
|
||||
return constAny(constant, uint64_t(value));
|
||||
}
|
||||
|
||||
IrOp IrBuilder::constInt(int value)
|
||||
{
|
||||
IrConst constant;
|
||||
|
|
|
@ -390,9 +390,6 @@ void toString(std::string& result, IrConst constant)
|
|||
{
|
||||
switch (constant.kind)
|
||||
{
|
||||
case IrConstKind::Bool:
|
||||
append(result, constant.valueBool ? "true" : "false");
|
||||
break;
|
||||
case IrConstKind::Int:
|
||||
append(result, "%di", constant.valueInt);
|
||||
break;
|
||||
|
|
|
@ -96,6 +96,15 @@ static void emitAddOffset(AssemblyBuilderA64& build, RegisterA64 dst, RegisterA6
|
|||
}
|
||||
}
|
||||
|
||||
static void emitAbort(AssemblyBuilderA64& build, Label& abort)
|
||||
{
|
||||
Label skip;
|
||||
build.b(skip);
|
||||
build.setLabel(abort);
|
||||
build.udf();
|
||||
build.setLabel(skip);
|
||||
}
|
||||
|
||||
static void emitFallback(AssemblyBuilderA64& build, int offset, int pcpos)
|
||||
{
|
||||
// fallback(L, instruction, base, k)
|
||||
|
@ -256,7 +265,11 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
}
|
||||
else if (inst.b.kind == IrOpKind::Constant)
|
||||
{
|
||||
if (intOp(inst.b) * sizeof(TValue) <= AssemblyBuilderA64::kMaxImmediate)
|
||||
if (intOp(inst.b) == 0)
|
||||
{
|
||||
// no offset required
|
||||
}
|
||||
else if (intOp(inst.b) * sizeof(TValue) <= AssemblyBuilderA64::kMaxImmediate)
|
||||
{
|
||||
build.add(inst.regA64, inst.regA64, uint16_t(intOp(inst.b) * sizeof(TValue)));
|
||||
}
|
||||
|
@ -562,7 +575,14 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
break;
|
||||
}
|
||||
case IrCmd::JUMP_EQ_TAG:
|
||||
if (inst.a.kind == IrOpKind::Inst && inst.b.kind == IrOpKind::Constant)
|
||||
{
|
||||
RegisterA64 zr = noreg;
|
||||
|
||||
if (inst.a.kind == IrOpKind::Constant && tagOp(inst.a) == 0)
|
||||
zr = regOp(inst.b);
|
||||
else if (inst.b.kind == IrOpKind::Constant && tagOp(inst.b) == 0)
|
||||
zr = regOp(inst.a);
|
||||
else if (inst.a.kind == IrOpKind::Inst && inst.b.kind == IrOpKind::Constant)
|
||||
build.cmp(regOp(inst.a), tagOp(inst.b));
|
||||
else if (inst.a.kind == IrOpKind::Inst && inst.b.kind == IrOpKind::Inst)
|
||||
build.cmp(regOp(inst.a), regOp(inst.b));
|
||||
|
@ -573,19 +593,33 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
|
||||
if (isFallthroughBlock(blockOp(inst.d), next))
|
||||
{
|
||||
build.b(ConditionA64::Equal, labelOp(inst.c));
|
||||
if (zr != noreg)
|
||||
build.cbz(zr, labelOp(inst.c));
|
||||
else
|
||||
build.b(ConditionA64::Equal, labelOp(inst.c));
|
||||
jumpOrFallthrough(blockOp(inst.d), next);
|
||||
}
|
||||
else
|
||||
{
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.d));
|
||||
if (zr != noreg)
|
||||
build.cbnz(zr, labelOp(inst.d));
|
||||
else
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.d));
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrCmd::JUMP_EQ_INT:
|
||||
LUAU_ASSERT(unsigned(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate);
|
||||
build.cmp(regOp(inst.a), uint16_t(intOp(inst.b)));
|
||||
build.b(ConditionA64::Equal, labelOp(inst.c));
|
||||
if (intOp(inst.b) == 0)
|
||||
{
|
||||
build.cbz(regOp(inst.a), labelOp(inst.c));
|
||||
}
|
||||
else
|
||||
{
|
||||
LUAU_ASSERT(unsigned(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate);
|
||||
build.cmp(regOp(inst.a), uint16_t(intOp(inst.b)));
|
||||
build.b(ConditionA64::Equal, labelOp(inst.c));
|
||||
}
|
||||
jumpOrFallthrough(blockOp(inst.d), next);
|
||||
break;
|
||||
case IrCmd::JUMP_LT_INT:
|
||||
|
@ -732,6 +766,9 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
build.ldr(x2, mem(x2, offsetof(global_State, tmname) + intOp(inst.b) * sizeof(TString*)));
|
||||
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaT_gettm)));
|
||||
build.blr(x3);
|
||||
|
||||
build.cbz(x0, labelOp(inst.c)); // no tag method
|
||||
|
||||
inst.regA64 = regs.takeReg(x0, index);
|
||||
break;
|
||||
}
|
||||
|
@ -871,7 +908,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
build.add(x2, rBase, uint16_t(vmRegOp(inst.c) * sizeof(TValue)));
|
||||
else if (inst.c.kind == IrOpKind::Constant)
|
||||
{
|
||||
TValue n;
|
||||
TValue n = {};
|
||||
setnvalue(&n, uintOp(inst.c));
|
||||
build.adr(x2, &n, sizeof(n));
|
||||
}
|
||||
|
@ -893,7 +930,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
build.add(x2, rBase, uint16_t(vmRegOp(inst.c) * sizeof(TValue)));
|
||||
else if (inst.c.kind == IrOpKind::Constant)
|
||||
{
|
||||
TValue n;
|
||||
TValue n = {};
|
||||
setnvalue(&n, uintOp(inst.c));
|
||||
build.adr(x2, &n, sizeof(n));
|
||||
}
|
||||
|
@ -908,25 +945,17 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
break;
|
||||
case IrCmd::GET_IMPORT:
|
||||
regs.spill(build, index);
|
||||
// luaV_getimport(L, cl->env, k, aux, /* propagatenil= */ false)
|
||||
// luaV_getimport(L, cl->env, k, ra, aux, /* propagatenil= */ false)
|
||||
build.mov(x0, rState);
|
||||
build.ldr(x1, mem(rClosure, offsetof(Closure, env)));
|
||||
build.mov(x2, rConstants);
|
||||
build.mov(w3, uintOp(inst.b));
|
||||
build.mov(w4, 0);
|
||||
build.ldr(x5, mem(rNativeContext, offsetof(NativeContext, luaV_getimport)));
|
||||
build.blr(x5);
|
||||
build.add(x3, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
|
||||
build.mov(w4, uintOp(inst.b));
|
||||
build.mov(w5, 0);
|
||||
build.ldr(x6, mem(rNativeContext, offsetof(NativeContext, luaV_getimport)));
|
||||
build.blr(x6);
|
||||
|
||||
emitUpdateBase(build);
|
||||
|
||||
// setobj2s(L, ra, L->top - 1)
|
||||
build.ldr(x0, mem(rState, offsetof(lua_State, top)));
|
||||
build.sub(x0, x0, sizeof(TValue));
|
||||
build.ldr(q0, x0);
|
||||
build.str(q0, mem(rBase, vmRegOp(inst.a) * sizeof(TValue)));
|
||||
|
||||
// L->top--
|
||||
build.str(x0, mem(rState, offsetof(lua_State, top)));
|
||||
break;
|
||||
case IrCmd::CONCAT:
|
||||
regs.spill(build, index);
|
||||
|
@ -1003,62 +1032,99 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
// note: no emitUpdateBase necessary because prepareFORN does not reallocate stack
|
||||
break;
|
||||
case IrCmd::CHECK_TAG:
|
||||
build.cmp(regOp(inst.a), tagOp(inst.b));
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.c));
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
Label& fail = inst.c.kind == IrOpKind::Undef ? abort : labelOp(inst.c);
|
||||
if (tagOp(inst.b) == 0)
|
||||
{
|
||||
build.cbnz(regOp(inst.a), fail);
|
||||
}
|
||||
else
|
||||
{
|
||||
build.cmp(regOp(inst.a), tagOp(inst.b));
|
||||
build.b(ConditionA64::NotEqual, fail);
|
||||
}
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_READONLY:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||
build.ldrb(temp, mem(regOp(inst.a), offsetof(Table, readonly)));
|
||||
build.cbnz(temp, labelOp(inst.b));
|
||||
build.cbnz(temp, inst.b.kind == IrOpKind::Undef ? abort : labelOp(inst.b));
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_NO_METATABLE:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::x);
|
||||
build.ldr(temp, mem(regOp(inst.a), offsetof(Table, metatable)));
|
||||
build.cbnz(temp, labelOp(inst.b));
|
||||
build.cbnz(temp, inst.b.kind == IrOpKind::Undef ? abort : labelOp(inst.b));
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_SAFE_ENV:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::x);
|
||||
RegisterA64 tempw = castReg(KindA64::w, temp);
|
||||
build.ldr(temp, mem(rClosure, offsetof(Closure, env)));
|
||||
build.ldrb(tempw, mem(temp, offsetof(Table, safeenv)));
|
||||
build.cbz(tempw, labelOp(inst.a));
|
||||
build.cbz(tempw, inst.a.kind == IrOpKind::Undef ? abort : labelOp(inst.a));
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_ARRAY_SIZE:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
Label& fail = inst.c.kind == IrOpKind::Undef ? abort : labelOp(inst.c);
|
||||
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||
build.ldr(temp, mem(regOp(inst.a), offsetof(Table, sizearray)));
|
||||
|
||||
if (inst.b.kind == IrOpKind::Inst)
|
||||
{
|
||||
build.cmp(temp, regOp(inst.b));
|
||||
build.b(ConditionA64::UnsignedLessEqual, fail);
|
||||
}
|
||||
else if (inst.b.kind == IrOpKind::Constant)
|
||||
{
|
||||
if (size_t(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate)
|
||||
if (intOp(inst.b) == 0)
|
||||
{
|
||||
build.cbz(temp, fail);
|
||||
}
|
||||
else if (size_t(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate)
|
||||
{
|
||||
build.cmp(temp, uint16_t(intOp(inst.b)));
|
||||
build.b(ConditionA64::UnsignedLessEqual, fail);
|
||||
}
|
||||
else
|
||||
{
|
||||
RegisterA64 temp2 = regs.allocTemp(KindA64::w);
|
||||
build.mov(temp2, intOp(inst.b));
|
||||
build.cmp(temp, temp2);
|
||||
build.b(ConditionA64::UnsignedLessEqual, fail);
|
||||
}
|
||||
}
|
||||
else
|
||||
LUAU_ASSERT(!"Unsupported instruction form");
|
||||
|
||||
build.b(ConditionA64::UnsignedLessEqual, labelOp(inst.c));
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
case IrCmd::CHECK_SLOT_MATCH:
|
||||
{
|
||||
Label& mismatch = inst.cmd == IrCmd::JUMP_SLOT_MATCH ? labelOp(inst.d) : labelOp(inst.c);
|
||||
Label abort; // used when guard aborts execution
|
||||
const IrOp& mismatchOp = inst.cmd == IrCmd::JUMP_SLOT_MATCH ? inst.d : inst.c;
|
||||
Label& mismatch = mismatchOp.kind == IrOpKind::Undef ? abort : labelOp(mismatchOp);
|
||||
|
||||
RegisterA64 temp1 = regs.allocTemp(KindA64::x);
|
||||
RegisterA64 temp1w = castReg(KindA64::w, temp1);
|
||||
|
@ -1081,15 +1147,21 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
|
||||
if (inst.cmd == IrCmd::JUMP_SLOT_MATCH)
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
else if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
{
|
||||
Label abort; // used when guard aborts execution
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||
|
||||
build.ldr(temp, mem(regOp(inst.a), offsetof(LuaNode, key) + kOffsetOfTKeyTagNext));
|
||||
build.lsr(temp, temp, kTKeyTagBits);
|
||||
build.cbnz(temp, labelOp(inst.b));
|
||||
build.cbnz(temp, inst.b.kind == IrOpKind::Undef ? abort : labelOp(inst.b));
|
||||
|
||||
if (abort.id)
|
||||
emitAbort(build, abort);
|
||||
break;
|
||||
}
|
||||
case IrCmd::INTERRUPT:
|
||||
|
@ -1762,11 +1834,6 @@ uint8_t IrLoweringA64::tagOp(IrOp op) const
|
|||
return function.tagOp(op);
|
||||
}
|
||||
|
||||
bool IrLoweringA64::boolOp(IrOp op) const
|
||||
{
|
||||
return function.boolOp(op);
|
||||
}
|
||||
|
||||
int IrLoweringA64::intOp(IrOp op) const
|
||||
{
|
||||
return function.intOp(op);
|
||||
|
|
|
@ -48,7 +48,6 @@ struct IrLoweringA64
|
|||
// Operand data lookup helpers
|
||||
IrConst constOp(IrOp op) const;
|
||||
uint8_t tagOp(IrOp op) const;
|
||||
bool boolOp(IrOp op) const;
|
||||
int intOp(IrOp op) const;
|
||||
unsigned uintOp(IrOp op) const;
|
||||
double doubleOp(IrOp op) const;
|
||||
|
|
|
@ -575,14 +575,6 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
jumpOnAnyCmpFallback(regs, build, vmRegOp(inst.a), vmRegOp(inst.b), conditionOp(inst.c), labelOp(inst.d));
|
||||
jumpOrFallthrough(blockOp(inst.e), next);
|
||||
break;
|
||||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
{
|
||||
ScopedRegX64 tmp{regs, SizeX64::qword};
|
||||
|
||||
jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(vmConstOp(inst.b)), labelOp(inst.d));
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
break;
|
||||
}
|
||||
case IrCmd::TABLE_LEN:
|
||||
{
|
||||
IrCallWrapperX64 callWrap(regs, build, index);
|
||||
|
@ -647,6 +639,9 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaT_gettm)]);
|
||||
}
|
||||
|
||||
build.test(rax, rax);
|
||||
build.jcc(ConditionX64::Zero, labelOp(inst.c)); // No tag method
|
||||
|
||||
inst.regX64 = regs.takeReg(rax, index);
|
||||
break;
|
||||
}
|
||||
|
@ -782,7 +777,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
}
|
||||
else if (inst.c.kind == IrOpKind::Constant)
|
||||
{
|
||||
TValue n;
|
||||
TValue n = {};
|
||||
setnvalue(&n, uintOp(inst.c));
|
||||
callGetTable(regs, build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
|
||||
}
|
||||
|
@ -798,7 +793,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
}
|
||||
else if (inst.c.kind == IrOpKind::Constant)
|
||||
{
|
||||
TValue n;
|
||||
TValue n = {};
|
||||
setnvalue(&n, uintOp(inst.c));
|
||||
callSetTable(regs, build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
|
||||
}
|
||||
|
@ -817,24 +812,12 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
callWrap.addArgument(SizeX64::qword, rState);
|
||||
callWrap.addArgument(SizeX64::qword, qword[tmp1.release() + offsetof(Closure, env)]);
|
||||
callWrap.addArgument(SizeX64::qword, rConstants);
|
||||
callWrap.addArgument(SizeX64::qword, luauRegAddress(vmRegOp(inst.a)));
|
||||
callWrap.addArgument(SizeX64::dword, uintOp(inst.b));
|
||||
callWrap.addArgument(SizeX64::dword, 0);
|
||||
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_getimport)]);
|
||||
|
||||
emitUpdateBase(build);
|
||||
|
||||
ScopedRegX64 tmp2{regs, SizeX64::qword};
|
||||
|
||||
// setobj2s(L, ra, L->top - 1)
|
||||
build.mov(tmp2.reg, qword[rState + offsetof(lua_State, top)]);
|
||||
build.sub(tmp2.reg, sizeof(TValue));
|
||||
|
||||
ScopedRegX64 tmp3{regs, SizeX64::xmmword};
|
||||
build.vmovups(tmp3.reg, xmmword[tmp2.reg]);
|
||||
build.vmovups(luauReg(vmRegOp(inst.a)), tmp3.reg);
|
||||
|
||||
// L->top--
|
||||
build.mov(qword[rState + offsetof(lua_State, top)], tmp2.reg);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CONCAT:
|
||||
|
@ -897,15 +880,15 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
break;
|
||||
case IrCmd::CHECK_TAG:
|
||||
build.cmp(memRegTagOp(inst.a), tagOp(inst.b));
|
||||
build.jcc(ConditionX64::NotEqual, labelOp(inst.c));
|
||||
jumpOrAbortOnUndef(ConditionX64::NotEqual, ConditionX64::Equal, inst.c);
|
||||
break;
|
||||
case IrCmd::CHECK_READONLY:
|
||||
build.cmp(byte[regOp(inst.a) + offsetof(Table, readonly)], 0);
|
||||
build.jcc(ConditionX64::NotEqual, labelOp(inst.b));
|
||||
jumpOrAbortOnUndef(ConditionX64::NotEqual, ConditionX64::Equal, inst.b);
|
||||
break;
|
||||
case IrCmd::CHECK_NO_METATABLE:
|
||||
build.cmp(qword[regOp(inst.a) + offsetof(Table, metatable)], 0);
|
||||
build.jcc(ConditionX64::NotEqual, labelOp(inst.b));
|
||||
jumpOrAbortOnUndef(ConditionX64::NotEqual, ConditionX64::Equal, inst.b);
|
||||
break;
|
||||
case IrCmd::CHECK_SAFE_ENV:
|
||||
{
|
||||
|
@ -914,7 +897,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
build.mov(tmp.reg, sClosure);
|
||||
build.mov(tmp.reg, qword[tmp.reg + offsetof(Closure, env)]);
|
||||
build.cmp(byte[tmp.reg + offsetof(Table, safeenv)], 0);
|
||||
build.jcc(ConditionX64::Equal, labelOp(inst.a));
|
||||
jumpOrAbortOnUndef(ConditionX64::Equal, ConditionX64::NotEqual, inst.a);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_ARRAY_SIZE:
|
||||
|
@ -925,13 +908,44 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
else
|
||||
LUAU_ASSERT(!"Unsupported instruction form");
|
||||
|
||||
build.jcc(ConditionX64::BelowEqual, labelOp(inst.c));
|
||||
jumpOrAbortOnUndef(ConditionX64::BelowEqual, ConditionX64::NotBelowEqual, inst.c);
|
||||
break;
|
||||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
case IrCmd::CHECK_SLOT_MATCH:
|
||||
{
|
||||
Label abort; // Used when guard aborts execution
|
||||
const IrOp& mismatchOp = inst.cmd == IrCmd::JUMP_SLOT_MATCH ? inst.d : inst.c;
|
||||
Label& mismatch = mismatchOp.kind == IrOpKind::Undef ? abort : labelOp(mismatchOp);
|
||||
|
||||
ScopedRegX64 tmp{regs, SizeX64::qword};
|
||||
|
||||
jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(vmConstOp(inst.b)), labelOp(inst.c));
|
||||
// Check if node key tag is a string
|
||||
build.mov(dwordReg(tmp.reg), luauNodeKeyTag(regOp(inst.a)));
|
||||
build.and_(dwordReg(tmp.reg), kTKeyTagMask);
|
||||
build.cmp(dwordReg(tmp.reg), LUA_TSTRING);
|
||||
build.jcc(ConditionX64::NotEqual, mismatch);
|
||||
|
||||
// Check that node key value matches the expected one
|
||||
build.mov(tmp.reg, luauConstantValue(vmConstOp(inst.b)));
|
||||
build.cmp(tmp.reg, luauNodeKeyValue(regOp(inst.a)));
|
||||
build.jcc(ConditionX64::NotEqual, mismatch);
|
||||
|
||||
// Check that node value is not nil
|
||||
build.cmp(dword[regOp(inst.a) + offsetof(LuaNode, val) + offsetof(TValue, tt)], LUA_TNIL);
|
||||
build.jcc(ConditionX64::Equal, mismatch);
|
||||
|
||||
if (inst.cmd == IrCmd::JUMP_SLOT_MATCH)
|
||||
{
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
}
|
||||
else if (mismatchOp.kind == IrOpKind::Undef)
|
||||
{
|
||||
Label skip;
|
||||
build.jmp(skip);
|
||||
build.setLabel(abort);
|
||||
build.ud2();
|
||||
build.setLabel(skip);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
|
@ -940,7 +954,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
|||
|
||||
build.mov(tmp.reg, dword[regOp(inst.a) + offsetof(LuaNode, key) + kOffsetOfTKeyTagNext]);
|
||||
build.shr(tmp.reg, kTKeyTagBits);
|
||||
build.jcc(ConditionX64::NotZero, labelOp(inst.b));
|
||||
jumpOrAbortOnUndef(ConditionX64::NotZero, ConditionX64::Zero, inst.b);
|
||||
break;
|
||||
}
|
||||
case IrCmd::INTERRUPT:
|
||||
|
@ -1356,6 +1370,21 @@ void IrLoweringX64::jumpOrFallthrough(IrBlock& target, IrBlock& next)
|
|||
build.jmp(target.label);
|
||||
}
|
||||
|
||||
void IrLoweringX64::jumpOrAbortOnUndef(ConditionX64 cond, ConditionX64 condInverse, IrOp targetOrUndef)
|
||||
{
|
||||
if (targetOrUndef.kind == IrOpKind::Undef)
|
||||
{
|
||||
Label skip;
|
||||
build.jcc(condInverse, skip);
|
||||
build.ud2();
|
||||
build.setLabel(skip);
|
||||
}
|
||||
else
|
||||
{
|
||||
build.jcc(cond, labelOp(targetOrUndef));
|
||||
}
|
||||
}
|
||||
|
||||
OperandX64 IrLoweringX64::memRegDoubleOp(IrOp op)
|
||||
{
|
||||
switch (op.kind)
|
||||
|
@ -1428,11 +1457,6 @@ uint8_t IrLoweringX64::tagOp(IrOp op) const
|
|||
return function.tagOp(op);
|
||||
}
|
||||
|
||||
bool IrLoweringX64::boolOp(IrOp op) const
|
||||
{
|
||||
return function.boolOp(op);
|
||||
}
|
||||
|
||||
int IrLoweringX64::intOp(IrOp op) const
|
||||
{
|
||||
return function.intOp(op);
|
||||
|
|
|
@ -34,6 +34,7 @@ struct IrLoweringX64
|
|||
|
||||
bool isFallthroughBlock(IrBlock target, IrBlock next);
|
||||
void jumpOrFallthrough(IrBlock& target, IrBlock& next);
|
||||
void jumpOrAbortOnUndef(ConditionX64 cond, ConditionX64 condInverse, IrOp targetOrUndef);
|
||||
|
||||
void storeDoubleAsFloat(OperandX64 dst, IrOp src);
|
||||
|
||||
|
@ -45,7 +46,6 @@ struct IrLoweringX64
|
|||
|
||||
IrConst constOp(IrOp op) const;
|
||||
uint8_t tagOp(IrOp op) const;
|
||||
bool boolOp(IrOp op) const;
|
||||
int intOp(IrOp op) const;
|
||||
unsigned uintOp(IrOp op) const;
|
||||
double doubleOp(IrOp op) const;
|
||||
|
|
|
@ -194,7 +194,7 @@ void IrRegAllocX64::preserve(IrInst& inst)
|
|||
else if (spill.valueKind == IrValueKind::Tag || spill.valueKind == IrValueKind::Int)
|
||||
build.mov(dword[sSpillArea + i * 8], inst.regX64);
|
||||
else
|
||||
LUAU_ASSERT(!"unsupported value kind");
|
||||
LUAU_ASSERT(!"Unsupported value kind");
|
||||
|
||||
usedSpillSlots.set(i);
|
||||
|
||||
|
@ -318,7 +318,7 @@ unsigned IrRegAllocX64::findSpillStackSlot(IrValueKind valueKind)
|
|||
return i;
|
||||
}
|
||||
|
||||
LUAU_ASSERT(!"nowhere to spill");
|
||||
LUAU_ASSERT(!"Nowhere to spill");
|
||||
return ~0u;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
#include "Luau/IrBuilder.h"
|
||||
#include "Luau/IrUtils.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
#include "IrTranslateBuiltins.h"
|
||||
|
||||
#include "lobject.h"
|
||||
#include "lstate.h"
|
||||
#include "ltm.h"
|
||||
|
||||
namespace Luau
|
||||
|
@ -366,7 +366,7 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
|
|||
result = build.inst(IrCmd::INVOKE_LIBM, build.constUint(LBF_MATH_POW), vb, vc);
|
||||
break;
|
||||
default:
|
||||
LUAU_ASSERT(!"unsupported binary op");
|
||||
LUAU_ASSERT(!"Unsupported binary op");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1068,13 +1068,13 @@ void translateInstCapture(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||
switch (type)
|
||||
{
|
||||
case LCT_VAL:
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constBool(false));
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constUint(0));
|
||||
break;
|
||||
case LCT_REF:
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constBool(true));
|
||||
build.inst(IrCmd::CAPTURE, build.vmReg(index), build.constUint(1));
|
||||
break;
|
||||
case LCT_UPVAL:
|
||||
build.inst(IrCmd::CAPTURE, build.vmUpvalue(index), build.constBool(false));
|
||||
build.inst(IrCmd::CAPTURE, build.vmUpvalue(index), build.constUint(0));
|
||||
break;
|
||||
default:
|
||||
LUAU_ASSERT(!"Unknown upvalue capture type");
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#pragma once
|
||||
|
||||
#include "Luau/Bytecode.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "ltm.h"
|
||||
|
@ -64,5 +66,38 @@ void translateInstNamecall(IrBuilder& build, const Instruction* pc, int pcpos);
|
|||
void translateInstAndX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c);
|
||||
void translateInstOrX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c);
|
||||
|
||||
inline int getOpLength(LuauOpcode op)
|
||||
{
|
||||
switch (op)
|
||||
{
|
||||
case LOP_GETGLOBAL:
|
||||
case LOP_SETGLOBAL:
|
||||
case LOP_GETIMPORT:
|
||||
case LOP_GETTABLEKS:
|
||||
case LOP_SETTABLEKS:
|
||||
case LOP_NAMECALL:
|
||||
case LOP_JUMPIFEQ:
|
||||
case LOP_JUMPIFLE:
|
||||
case LOP_JUMPIFLT:
|
||||
case LOP_JUMPIFNOTEQ:
|
||||
case LOP_JUMPIFNOTLE:
|
||||
case LOP_JUMPIFNOTLT:
|
||||
case LOP_NEWTABLE:
|
||||
case LOP_SETLIST:
|
||||
case LOP_FORGLOOP:
|
||||
case LOP_LOADKX:
|
||||
case LOP_FASTCALL2:
|
||||
case LOP_FASTCALL2K:
|
||||
case LOP_JUMPXEQKNIL:
|
||||
case LOP_JUMPXEQKB:
|
||||
case LOP_JUMPXEQKN:
|
||||
case LOP_JUMPXEQKS:
|
||||
return 2;
|
||||
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace CodeGen
|
||||
} // namespace Luau
|
||||
|
|
|
@ -356,7 +356,11 @@ void applySubstitutions(IrFunction& function, IrOp& op)
|
|||
src.useCount--;
|
||||
|
||||
if (src.useCount == 0)
|
||||
{
|
||||
src.cmd = IrCmd::NOP;
|
||||
removeUse(function, src.a);
|
||||
src.a = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -396,7 +400,7 @@ bool compare(double a, double b, IrCondition cond)
|
|||
case IrCondition::NotGreaterEqual:
|
||||
return !(a >= b);
|
||||
default:
|
||||
LUAU_ASSERT(!"unsupported conidtion");
|
||||
LUAU_ASSERT(!"Unsupported condition");
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include "Luau/UnwindBuilder.h"
|
||||
|
||||
#include "CodeGenUtils.h"
|
||||
#include "CustomExecUtils.h"
|
||||
|
||||
#include "lbuiltins.h"
|
||||
#include "lgc.h"
|
||||
|
|
|
@ -38,7 +38,7 @@ struct NativeContext
|
|||
void (*luaV_prepareFORN)(lua_State* L, StkId plimit, StkId pstep, StkId pinit) = nullptr;
|
||||
void (*luaV_gettable)(lua_State* L, const TValue* t, TValue* key, StkId val) = nullptr;
|
||||
void (*luaV_settable)(lua_State* L, const TValue* t, TValue* key, StkId val) = nullptr;
|
||||
void (*luaV_getimport)(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil) = nullptr;
|
||||
void (*luaV_getimport)(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil) = nullptr;
|
||||
void (*luaV_concat)(lua_State* L, int total, int last) = nullptr;
|
||||
|
||||
int (*luaH_getn)(Table* t) = nullptr;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <vector>
|
||||
|
||||
LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -57,6 +58,12 @@ struct ConstPropState
|
|||
return 0xff;
|
||||
}
|
||||
|
||||
void updateTag(IrOp op, uint8_t tag)
|
||||
{
|
||||
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
||||
info->tag = tag;
|
||||
}
|
||||
|
||||
void saveTag(IrOp op, uint8_t tag)
|
||||
{
|
||||
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
||||
|
@ -139,8 +146,15 @@ struct ConstPropState
|
|||
|
||||
void invalidateRegisterRange(int firstReg, int count)
|
||||
{
|
||||
for (int i = firstReg; i < firstReg + count && i <= maxReg; ++i)
|
||||
invalidate(regs[i], /* invalidateTag */ true, /* invalidateValue */ true);
|
||||
if (count == -1)
|
||||
{
|
||||
invalidateRegistersFrom(firstReg);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int i = firstReg; i < firstReg + count && i <= maxReg; ++i)
|
||||
invalidate(regs[i], /* invalidateTag */ true, /* invalidateValue */ true);
|
||||
}
|
||||
}
|
||||
|
||||
void invalidateCapturedRegisters()
|
||||
|
@ -202,7 +216,7 @@ struct ConstPropState
|
|||
if (RegisterLink* link = instLink.find(instOp.index))
|
||||
{
|
||||
// Check that the target register hasn't changed the value
|
||||
if (link->version > regs[link->reg].version)
|
||||
if (link->version < regs[link->reg].version)
|
||||
return nullptr;
|
||||
|
||||
return link;
|
||||
|
@ -229,9 +243,18 @@ struct ConstPropState
|
|||
return;
|
||||
|
||||
if (uint32_t* prevIdx = valueMap.find(inst))
|
||||
substitute(function, inst, IrOp{IrOpKind::Inst, *prevIdx});
|
||||
else
|
||||
valueMap[inst] = instIdx;
|
||||
{
|
||||
const IrInst& prev = function.instructions[*prevIdx];
|
||||
|
||||
// Previous load might have been removed as unused
|
||||
if (prev.useCount != 0)
|
||||
{
|
||||
substitute(function, inst, IrOp{IrOpKind::Inst, *prevIdx});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
valueMap[inst] = instIdx;
|
||||
}
|
||||
|
||||
// Vm register load can be replaced by a previous load of the same version of the register
|
||||
|
@ -253,23 +276,28 @@ struct ConstPropState
|
|||
// Check if there is a value that already has this version of the register
|
||||
if (uint32_t* prevIdx = valueMap.find(versionedLoad))
|
||||
{
|
||||
// Previous value might not be linked to a register yet
|
||||
// For example, it could be a NEW_TABLE stored into a register and we might need to track guards made with this value
|
||||
if (!instLink.contains(*prevIdx))
|
||||
createRegLink(*prevIdx, loadInst.a);
|
||||
const IrInst& prev = function.instructions[*prevIdx];
|
||||
|
||||
// Substitute load instructon with the previous value
|
||||
substitute(function, loadInst, IrOp{IrOpKind::Inst, *prevIdx});
|
||||
// Previous load might have been removed as unused
|
||||
if (prev.useCount != 0)
|
||||
{
|
||||
// Previous value might not be linked to a register yet
|
||||
// For example, it could be a NEW_TABLE stored into a register and we might need to track guards made with this value
|
||||
if (!instLink.contains(*prevIdx))
|
||||
createRegLink(*prevIdx, loadInst.a);
|
||||
|
||||
// Substitute load instructon with the previous value
|
||||
substitute(function, loadInst, IrOp{IrOpKind::Inst, *prevIdx});
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
uint32_t instIdx = function.getInstIndex(loadInst);
|
||||
|
||||
// Record load of this register version for future substitution
|
||||
valueMap[versionedLoad] = instIdx;
|
||||
uint32_t instIdx = function.getInstIndex(loadInst);
|
||||
|
||||
createRegLink(instIdx, loadInst.a);
|
||||
}
|
||||
// Record load of this register version for future substitution
|
||||
valueMap[versionedLoad] = instIdx;
|
||||
|
||||
createRegLink(instIdx, loadInst.a);
|
||||
}
|
||||
|
||||
// VM register loads can use the value that was stored in the same Vm register earlier
|
||||
|
@ -449,9 +477,16 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
}
|
||||
|
||||
if (state.tryGetTag(source) == value)
|
||||
kill(function, inst);
|
||||
{
|
||||
if (FFlag::DebugLuauAbortingChecks)
|
||||
replace(function, block, index, {IrCmd::CHECK_TAG, inst.a, inst.b, build.undef()});
|
||||
else
|
||||
kill(function, inst);
|
||||
}
|
||||
else
|
||||
{
|
||||
state.saveTag(source, value);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -619,13 +654,20 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
if (uint8_t tag = state.tryGetTag(inst.a); tag != 0xff)
|
||||
{
|
||||
if (tag == b)
|
||||
kill(function, inst);
|
||||
{
|
||||
if (FFlag::DebugLuauAbortingChecks)
|
||||
replace(function, inst.c, build.undef());
|
||||
else
|
||||
kill(function, inst);
|
||||
}
|
||||
else
|
||||
{
|
||||
replace(function, block, index, {IrCmd::JUMP, inst.c}); // Shows a conflict in assumptions on this path
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
state.saveTag(inst.a, b); // We can assume the tag value going forward
|
||||
state.updateTag(inst.a, b); // We can assume the tag value going forward
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -633,25 +675,46 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.a))
|
||||
{
|
||||
if (info->knownNotReadonly)
|
||||
kill(function, inst);
|
||||
{
|
||||
if (FFlag::DebugLuauAbortingChecks)
|
||||
replace(function, inst.b, build.undef());
|
||||
else
|
||||
kill(function, inst);
|
||||
}
|
||||
else
|
||||
{
|
||||
info->knownNotReadonly = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case IrCmd::CHECK_NO_METATABLE:
|
||||
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.a))
|
||||
{
|
||||
if (info->knownNoMetatable)
|
||||
kill(function, inst);
|
||||
{
|
||||
if (FFlag::DebugLuauAbortingChecks)
|
||||
replace(function, inst.b, build.undef());
|
||||
else
|
||||
kill(function, inst);
|
||||
}
|
||||
else
|
||||
{
|
||||
info->knownNoMetatable = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case IrCmd::CHECK_SAFE_ENV:
|
||||
if (state.inSafeEnv)
|
||||
kill(function, inst);
|
||||
{
|
||||
if (FFlag::DebugLuauAbortingChecks)
|
||||
replace(function, inst.a, build.undef());
|
||||
else
|
||||
kill(function, inst);
|
||||
}
|
||||
else
|
||||
{
|
||||
state.inSafeEnv = true;
|
||||
}
|
||||
break;
|
||||
case IrCmd::CHECK_GC:
|
||||
// It is enough to perform a GC check once in a block
|
||||
|
|
|
@ -300,8 +300,9 @@ enum LuauOpcode
|
|||
// A: target register (see FORGLOOP for register layout)
|
||||
LOP_FORGPREP_NEXT,
|
||||
|
||||
// removed in v3
|
||||
LOP_DEP_FORGLOOP_NEXT,
|
||||
// NATIVECALL: start executing new function in native code
|
||||
// this is a pseudo-instruction that is never emitted by bytecode compiler, but can be constructed at runtime to accelerate native code dispatch
|
||||
LOP_NATIVECALL,
|
||||
|
||||
// GETVARARGS: copy variables into the target register from vararg storage for current function
|
||||
// A: target register
|
||||
|
|
|
@ -14,8 +14,6 @@ inline bool isFlagExperimental(const char* flag)
|
|||
"LuauInstantiateInSubtyping", // requires some fixes to lua-apps code
|
||||
"LuauTypecheckTypeguards", // requires some fixes to lua-apps code (CLI-67030)
|
||||
"LuauTinyControlFlowAnalysis", // waiting for updates to packages depended by internal builtin plugins
|
||||
"LuauUnifyTwoOptions", // requires some fixes to lua-apps code
|
||||
|
||||
// makes sure we always have at least one entry
|
||||
nullptr,
|
||||
};
|
||||
|
|
|
@ -252,8 +252,7 @@ BuiltinInfo getBuiltinInfo(int bfid)
|
|||
return {-1, -1};
|
||||
|
||||
case LBF_ASSERT:
|
||||
return {-1, -1};
|
||||
; // assert() returns all values when first value is truthy
|
||||
return {-1, -1}; // assert() returns all values when first value is truthy
|
||||
|
||||
case LBF_MATH_ABS:
|
||||
case LBF_MATH_ACOS:
|
||||
|
|
|
@ -25,7 +25,7 @@ LUAU_FASTINTVARIABLE(LuauCompileInlineThreshold, 25)
|
|||
LUAU_FASTINTVARIABLE(LuauCompileInlineThresholdMaxBoost, 300)
|
||||
LUAU_FASTINTVARIABLE(LuauCompileInlineDepth, 5)
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauCompileLimitInsns, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauCompileInlineDefer, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
@ -250,7 +250,7 @@ struct Compiler
|
|||
|
||||
popLocals(0);
|
||||
|
||||
if (FFlag::LuauCompileLimitInsns && bytecode.getInstructionCount() > kMaxInstructionCount)
|
||||
if (bytecode.getInstructionCount() > kMaxInstructionCount)
|
||||
CompileError::raise(func->location, "Exceeded function instruction limit; split the function into parts to compile");
|
||||
|
||||
bytecode.endFunction(uint8_t(stackSize), uint8_t(upvals.size()));
|
||||
|
@ -559,10 +559,19 @@ struct Compiler
|
|||
|
||||
size_t oldLocals = localStack.size();
|
||||
|
||||
// note that we push the frame early; this is needed to block recursive inline attempts
|
||||
inlineFrames.push_back({func, oldLocals, target, targetCount});
|
||||
std::vector<InlineArg> args;
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
{
|
||||
args.reserve(func->args.size);
|
||||
}
|
||||
else
|
||||
{
|
||||
// note that we push the frame early; this is needed to block recursive inline attempts
|
||||
inlineFrames.push_back({func, oldLocals, target, targetCount});
|
||||
}
|
||||
|
||||
// evaluate all arguments; note that we don't emit code for constant arguments (relying on constant folding)
|
||||
// note that compiler state (variable registers/values) does not change here - we defer that to a separate loop below to handle nested calls
|
||||
for (size_t i = 0; i < func->args.size; ++i)
|
||||
{
|
||||
AstLocal* var = func->args.data[i];
|
||||
|
@ -581,8 +590,16 @@ struct Compiler
|
|||
else
|
||||
LUAU_ASSERT(!"Unexpected expression type");
|
||||
|
||||
for (size_t j = i; j < func->args.size; ++j)
|
||||
pushLocal(func->args.data[j], uint8_t(reg + (j - i)));
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
{
|
||||
for (size_t j = i; j < func->args.size; ++j)
|
||||
args.push_back({func->args.data[j], uint8_t(reg + (j - i))});
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t j = i; j < func->args.size; ++j)
|
||||
pushLocal(func->args.data[j], uint8_t(reg + (j - i)));
|
||||
}
|
||||
|
||||
// all remaining function arguments have been allocated and assigned to
|
||||
break;
|
||||
|
@ -597,17 +614,26 @@ struct Compiler
|
|||
else
|
||||
bytecode.emitABC(LOP_LOADNIL, reg, 0, 0);
|
||||
|
||||
pushLocal(var, reg);
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, reg});
|
||||
else
|
||||
pushLocal(var, reg);
|
||||
}
|
||||
else if (arg == nullptr)
|
||||
{
|
||||
// since the argument is not mutated, we can simply fold the value into the expressions that need it
|
||||
locstants[var] = {Constant::Type_Nil};
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, kInvalidReg, {Constant::Type_Nil}});
|
||||
else
|
||||
locstants[var] = {Constant::Type_Nil};
|
||||
}
|
||||
else if (const Constant* cv = constants.find(arg); cv && cv->type != Constant::Type_Unknown)
|
||||
{
|
||||
// since the argument is not mutated, we can simply fold the value into the expressions that need it
|
||||
locstants[var] = *cv;
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, kInvalidReg, *cv});
|
||||
else
|
||||
locstants[var] = *cv;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -617,13 +643,20 @@ struct Compiler
|
|||
// if the argument is a local that isn't mutated, we will simply reuse the existing register
|
||||
if (int reg = le ? getExprLocalReg(le) : -1; reg >= 0 && (!lv || !lv->written))
|
||||
{
|
||||
pushLocal(var, uint8_t(reg));
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, uint8_t(reg)});
|
||||
else
|
||||
pushLocal(var, uint8_t(reg));
|
||||
}
|
||||
else
|
||||
{
|
||||
uint8_t temp = allocReg(arg, 1);
|
||||
compileExprTemp(arg, temp);
|
||||
pushLocal(var, temp);
|
||||
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
args.push_back({var, temp});
|
||||
else
|
||||
pushLocal(var, temp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -635,6 +668,20 @@ struct Compiler
|
|||
compileExprAuto(expr->args.data[i], rsi);
|
||||
}
|
||||
|
||||
if (FFlag::LuauCompileInlineDefer)
|
||||
{
|
||||
// apply all evaluated arguments to the compiler state
|
||||
// note: locals use current startpc for debug info, although some of them have been computed earlier; this is similar to compileStatLocal
|
||||
for (InlineArg& arg : args)
|
||||
if (arg.value.type == Constant::Type_Unknown)
|
||||
pushLocal(arg.local, arg.reg);
|
||||
else
|
||||
locstants[arg.local] = arg.value;
|
||||
|
||||
// the inline frame will be used to compile return statements as well as to reject recursive inlining attempts
|
||||
inlineFrames.push_back({func, oldLocals, target, targetCount});
|
||||
}
|
||||
|
||||
// fold constant values updated above into expressions in the function body
|
||||
foldConstants(constants, variables, locstants, builtinsFold, func->body);
|
||||
|
||||
|
@ -3747,6 +3794,14 @@ struct Compiler
|
|||
AstExpr* untilCondition;
|
||||
};
|
||||
|
||||
struct InlineArg
|
||||
{
|
||||
AstLocal* local;
|
||||
|
||||
uint8_t reg;
|
||||
Constant value;
|
||||
};
|
||||
|
||||
struct InlineFrame
|
||||
{
|
||||
AstExprFunction* func;
|
||||
|
|
|
@ -12,8 +12,6 @@
|
|||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauFixBreakpointLineSearch, false)
|
||||
|
||||
static const char* getfuncname(Closure* f);
|
||||
|
||||
static int currentpc(lua_State* L, CallInfo* ci)
|
||||
|
@ -427,22 +425,11 @@ static int getnextline(Proto* p, int line)
|
|||
|
||||
int candidate = luaG_getline(p, i);
|
||||
|
||||
if (FFlag::LuauFixBreakpointLineSearch)
|
||||
{
|
||||
if (candidate == line)
|
||||
return line;
|
||||
if (candidate == line)
|
||||
return line;
|
||||
|
||||
if (candidate > line && (closest == -1 || candidate < closest))
|
||||
closest = candidate;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (candidate >= line)
|
||||
{
|
||||
closest = candidate;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (candidate > line && (closest == -1 || candidate < closest))
|
||||
closest = candidate;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -451,21 +438,11 @@ static int getnextline(Proto* p, int line)
|
|||
// Find the closest line number to the intended one.
|
||||
int candidate = getnextline(p->p[i], line);
|
||||
|
||||
if (FFlag::LuauFixBreakpointLineSearch)
|
||||
{
|
||||
if (candidate == line)
|
||||
return line;
|
||||
if (candidate == line)
|
||||
return line;
|
||||
|
||||
if (candidate > line && (closest == -1 || candidate < closest))
|
||||
closest = candidate;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (closest == -1 || (candidate >= line && candidate < closest))
|
||||
{
|
||||
closest = candidate;
|
||||
}
|
||||
}
|
||||
if (candidate > line && (closest == -1 || candidate < closest))
|
||||
closest = candidate;
|
||||
}
|
||||
|
||||
return closest;
|
||||
|
|
|
@ -31,7 +31,7 @@ Proto* luaF_newproto(lua_State* L)
|
|||
f->source = NULL;
|
||||
f->debugname = NULL;
|
||||
f->debuginsn = NULL;
|
||||
|
||||
f->codeentry = NULL;
|
||||
f->execdata = NULL;
|
||||
f->exectarget = 0;
|
||||
|
||||
|
|
|
@ -275,6 +275,7 @@ typedef struct Proto
|
|||
TString* debugname;
|
||||
uint8_t* debuginsn; // a copy of code[] array with just opcodes
|
||||
|
||||
const Instruction* codeentry;
|
||||
void* execdata;
|
||||
uintptr_t exectarget;
|
||||
|
||||
|
|
|
@ -219,9 +219,7 @@ lua_State* lua_newstate(lua_Alloc f, void* ud)
|
|||
|
||||
g->cb = lua_Callbacks();
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
g->ecb = lua_ExecutionCallbacks();
|
||||
#endif
|
||||
|
||||
g->gcstats = GCStats();
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ typedef struct CallInfo
|
|||
|
||||
#define LUA_CALLINFO_RETURN (1 << 0) // should the interpreter return after returning from this callinfo? first frame must have this set
|
||||
#define LUA_CALLINFO_HANDLE (1 << 1) // should the error thrown during execution get handled by continuation from this callinfo? func must be C
|
||||
#define LUA_CALLINFO_CUSTOM (1 << 2) // should this function be executed using custom execution callback
|
||||
#define LUA_CALLINFO_NATIVE (1 << 2) // should this function be executed using execution callback for native code
|
||||
|
||||
#define curr_func(L) (clvalue(L->ci->func))
|
||||
#define ci_func(ci) (clvalue((ci)->func))
|
||||
|
@ -211,9 +211,7 @@ typedef struct global_State
|
|||
|
||||
lua_Callbacks cb;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
lua_ExecutionCallbacks ecb;
|
||||
#endif
|
||||
|
||||
void (*udatagc[LUA_UTAG_LIMIT])(lua_State*, void*); // for each userdata tag, a gc callback to be called immediately before freeing memory
|
||||
|
||||
|
|
|
@ -23,7 +23,8 @@ LUAI_FUNC int luaV_tostring(lua_State* L, StkId obj);
|
|||
LUAI_FUNC void luaV_gettable(lua_State* L, const TValue* t, TValue* key, StkId val);
|
||||
LUAI_FUNC void luaV_settable(lua_State* L, const TValue* t, TValue* key, StkId val);
|
||||
LUAI_FUNC void luaV_concat(lua_State* L, int total, int last);
|
||||
LUAI_FUNC void luaV_getimport(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil);
|
||||
LUAI_FUNC void luaV_getimport(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil);
|
||||
LUAI_FUNC void luaV_getimport_dep(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil);
|
||||
LUAI_FUNC void luaV_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit);
|
||||
LUAI_FUNC void luaV_callTM(lua_State* L, int nparams, int res);
|
||||
LUAI_FUNC void luaV_tryfuncTM(lua_State* L, StkId func);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <string.h>
|
||||
|
||||
LUAU_FASTFLAG(LuauUniformTopHandling)
|
||||
LUAU_FASTFLAG(LuauGetImportDirect)
|
||||
|
||||
// Disable c99-designator to avoid the warning in CGOTO dispatch table
|
||||
#ifdef __clang__
|
||||
|
@ -101,7 +102,7 @@ LUAU_FASTFLAG(LuauUniformTopHandling)
|
|||
VM_DISPATCH_OP(LOP_CONCAT), VM_DISPATCH_OP(LOP_NOT), VM_DISPATCH_OP(LOP_MINUS), VM_DISPATCH_OP(LOP_LENGTH), VM_DISPATCH_OP(LOP_NEWTABLE), \
|
||||
VM_DISPATCH_OP(LOP_DUPTABLE), VM_DISPATCH_OP(LOP_SETLIST), VM_DISPATCH_OP(LOP_FORNPREP), VM_DISPATCH_OP(LOP_FORNLOOP), \
|
||||
VM_DISPATCH_OP(LOP_FORGLOOP), VM_DISPATCH_OP(LOP_FORGPREP_INEXT), VM_DISPATCH_OP(LOP_DEP_FORGLOOP_INEXT), VM_DISPATCH_OP(LOP_FORGPREP_NEXT), \
|
||||
VM_DISPATCH_OP(LOP_DEP_FORGLOOP_NEXT), VM_DISPATCH_OP(LOP_GETVARARGS), VM_DISPATCH_OP(LOP_DUPCLOSURE), VM_DISPATCH_OP(LOP_PREPVARARGS), \
|
||||
VM_DISPATCH_OP(LOP_NATIVECALL), VM_DISPATCH_OP(LOP_GETVARARGS), VM_DISPATCH_OP(LOP_DUPCLOSURE), VM_DISPATCH_OP(LOP_PREPVARARGS), \
|
||||
VM_DISPATCH_OP(LOP_LOADKX), VM_DISPATCH_OP(LOP_JUMPX), VM_DISPATCH_OP(LOP_FASTCALL), VM_DISPATCH_OP(LOP_COVERAGE), \
|
||||
VM_DISPATCH_OP(LOP_CAPTURE), VM_DISPATCH_OP(LOP_DEP_JUMPIFEQK), VM_DISPATCH_OP(LOP_DEP_JUMPIFNOTEQK), VM_DISPATCH_OP(LOP_FASTCALL1), \
|
||||
VM_DISPATCH_OP(LOP_FASTCALL2), VM_DISPATCH_OP(LOP_FASTCALL2K), VM_DISPATCH_OP(LOP_FORGPREP), VM_DISPATCH_OP(LOP_JUMPXEQKNIL), \
|
||||
|
@ -210,7 +211,7 @@ static void luau_execute(lua_State* L)
|
|||
LUAU_ASSERT(!isblack(obj2gco(L))); // we don't use luaC_threadbarrier because active threads never turn black
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if ((L->ci->flags & LUA_CALLINFO_CUSTOM) && !SingleStep)
|
||||
if ((L->ci->flags & LUA_CALLINFO_NATIVE) && !SingleStep)
|
||||
{
|
||||
Proto* p = clvalue(L->ci->func)->l.p;
|
||||
LUAU_ASSERT(p->execdata);
|
||||
|
@ -432,12 +433,20 @@ reentry:
|
|||
{
|
||||
uint32_t aux = *pc++;
|
||||
|
||||
VM_PROTECT(luaV_getimport(L, cl->env, k, aux, /* propagatenil= */ false));
|
||||
ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack
|
||||
if (FFlag::LuauGetImportDirect)
|
||||
{
|
||||
VM_PROTECT(luaV_getimport(L, cl->env, k, ra, aux, /* propagatenil= */ false));
|
||||
VM_NEXT();
|
||||
}
|
||||
else
|
||||
{
|
||||
VM_PROTECT(luaV_getimport_dep(L, cl->env, k, aux, /* propagatenil= */ false));
|
||||
ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack
|
||||
|
||||
setobj2s(L, ra, L->top - 1);
|
||||
L->top--;
|
||||
VM_NEXT();
|
||||
setobj2s(L, ra, L->top - 1);
|
||||
L->top--;
|
||||
VM_NEXT();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -954,21 +963,11 @@ reentry:
|
|||
setnilvalue(argi++); // complete missing arguments
|
||||
L->top = p->is_vararg ? argi : ci->top;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (LUAU_UNLIKELY(p->execdata && !SingleStep))
|
||||
{
|
||||
ci->flags = LUA_CALLINFO_CUSTOM;
|
||||
ci->savedpc = p->code;
|
||||
|
||||
if (L->global->ecb.enter(L, p) == 1)
|
||||
goto reentry;
|
||||
else
|
||||
goto exit;
|
||||
}
|
||||
#endif
|
||||
|
||||
// reentry
|
||||
pc = p->code;
|
||||
// codeentry may point to NATIVECALL instruction when proto is compiled to native code
|
||||
// this will result in execution continuing in native code, and is equivalent to if (p->execdata) but has no additional overhead
|
||||
// note that p->codeentry may point *outside* of p->code..p->code+p->sizecode, but that pointer never gets saved to savedpc.
|
||||
pc = SingleStep ? p->code : p->codeentry;
|
||||
cl = ccl;
|
||||
base = L->base;
|
||||
k = p->k;
|
||||
|
@ -1055,7 +1054,7 @@ reentry:
|
|||
Proto* nextproto = nextcl->l.p;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (LUAU_UNLIKELY((cip->flags & LUA_CALLINFO_CUSTOM) && !SingleStep))
|
||||
if (LUAU_UNLIKELY((cip->flags & LUA_CALLINFO_NATIVE) && !SingleStep))
|
||||
{
|
||||
if (L->global->ecb.enter(L, nextproto) == 1)
|
||||
goto reentry;
|
||||
|
@ -2380,10 +2379,24 @@ reentry:
|
|||
VM_NEXT();
|
||||
}
|
||||
|
||||
VM_CASE(LOP_DEP_FORGLOOP_NEXT)
|
||||
VM_CASE(LOP_NATIVECALL)
|
||||
{
|
||||
LUAU_ASSERT(!"Unsupported deprecated opcode");
|
||||
Proto* p = cl->l.p;
|
||||
LUAU_ASSERT(p->execdata);
|
||||
|
||||
CallInfo* ci = L->ci;
|
||||
ci->flags = LUA_CALLINFO_NATIVE;
|
||||
ci->savedpc = p->code;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (L->global->ecb.enter(L, p) == 1)
|
||||
goto reentry;
|
||||
else
|
||||
goto exit;
|
||||
#else
|
||||
LUAU_ASSERT(!"Opcode is only valid when LUA_CUSTOM_EXECUTION is defined");
|
||||
LUAU_UNREACHABLE();
|
||||
#endif
|
||||
}
|
||||
|
||||
VM_CASE(LOP_GETVARARGS)
|
||||
|
@ -2896,7 +2909,7 @@ int luau_precall(lua_State* L, StkId func, int nresults)
|
|||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (p->execdata)
|
||||
ci->flags = LUA_CALLINFO_CUSTOM;
|
||||
ci->flags = LUA_CALLINFO_NATIVE;
|
||||
#endif
|
||||
|
||||
return PCRLUA;
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
|
||||
#include <string.h>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauGetImportDirect, false)
|
||||
|
||||
// TODO: RAII deallocation doesn't work for longjmp builds if a memory error happens
|
||||
template<typename T>
|
||||
struct TempBuffer
|
||||
|
@ -40,8 +42,45 @@ struct TempBuffer
|
|||
}
|
||||
};
|
||||
|
||||
void luaV_getimport(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil)
|
||||
void luaV_getimport(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil)
|
||||
{
|
||||
int count = id >> 30;
|
||||
LUAU_ASSERT(count > 0);
|
||||
|
||||
int id0 = int(id >> 20) & 1023;
|
||||
int id1 = int(id >> 10) & 1023;
|
||||
int id2 = int(id) & 1023;
|
||||
|
||||
// after the first call to luaV_gettable, res may be invalid, and env may (sometimes) be garbage collected
|
||||
// we take care to not use env again and to restore res before every consecutive use
|
||||
ptrdiff_t resp = savestack(L, res);
|
||||
|
||||
// global lookup for id0
|
||||
TValue g;
|
||||
sethvalue(L, &g, env);
|
||||
luaV_gettable(L, &g, &k[id0], res);
|
||||
|
||||
// table lookup for id1
|
||||
if (count < 2)
|
||||
return;
|
||||
|
||||
res = restorestack(L, resp);
|
||||
if (!propagatenil || !ttisnil(res))
|
||||
luaV_gettable(L, res, &k[id1], res);
|
||||
|
||||
// table lookup for id2
|
||||
if (count < 3)
|
||||
return;
|
||||
|
||||
res = restorestack(L, resp);
|
||||
if (!propagatenil || !ttisnil(res))
|
||||
luaV_gettable(L, res, &k[id2], res);
|
||||
}
|
||||
|
||||
void luaV_getimport_dep(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil)
|
||||
{
|
||||
LUAU_ASSERT(!FFlag::LuauGetImportDirect);
|
||||
|
||||
int count = id >> 30;
|
||||
int id0 = count > 0 ? int(id >> 20) & 1023 : -1;
|
||||
int id1 = count > 1 ? int(id >> 10) & 1023 : -1;
|
||||
|
@ -114,7 +153,17 @@ static void resolveImportSafe(lua_State* L, Table* env, TValue* k, uint32_t id)
|
|||
// note: we call getimport with nil propagation which means that accesses to table chains like A.B.C will resolve in nil
|
||||
// this is technically not necessary but it reduces the number of exceptions when loading scripts that rely on getfenv/setfenv for global
|
||||
// injection
|
||||
luaV_getimport(L, L->gt, self->k, self->id, /* propagatenil= */ true);
|
||||
if (FFlag::LuauGetImportDirect)
|
||||
{
|
||||
// allocate a stack slot so that we can do table lookups
|
||||
luaD_checkstack(L, 1);
|
||||
setnilvalue(L->top);
|
||||
L->top++;
|
||||
|
||||
luaV_getimport(L, L->gt, self->k, L->top - 1, self->id, /* propagatenil= */ true);
|
||||
}
|
||||
else
|
||||
luaV_getimport_dep(L, L->gt, self->k, self->id, /* propagatenil= */ true);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -204,6 +253,8 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
|
|||
for (int j = 0; j < p->sizecode; ++j)
|
||||
p->code[j] = read<uint32_t>(data, size, offset);
|
||||
|
||||
p->codeentry = p->code;
|
||||
|
||||
p->sizek = readVarInt(data, size, offset);
|
||||
p->k = luaM_newarray(L, p->sizek, TValue, p->memcat);
|
||||
|
||||
|
|
Loading…
Reference in New Issue