// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details // This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details // This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand #include "Fallbacks.h" #include "FallbacksProlog.h" const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; StkId ra = VM_REG(LUAU_INSN_A(insn)); uint32_t aux = *pc++; TValue* kv = VM_KV(aux); LUAU_ASSERT(ttisstring(kv)); // fast-path: value is in expected slot Table* h = cl->env; int slot = LUAU_INSN_C(insn) & h->nodemask8; LuaNode* n = &h->node[slot]; if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv)) && !ttisnil(gval(n))) { setobj2s(L, ra, gval(n)); return pc; } else { // slow-path, may invoke Lua calls via __index metamethod TValue g; sethvalue(L, &g, h); L->cachedslot = slot; VM_PROTECT(luaV_gettable(L, &g, kv, ra)); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); return pc; } } const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; StkId ra = VM_REG(LUAU_INSN_A(insn)); uint32_t aux = *pc++; TValue* kv = VM_KV(aux); LUAU_ASSERT(ttisstring(kv)); // fast-path: value is in expected slot Table* h = cl->env; int slot = LUAU_INSN_C(insn) & h->nodemask8; LuaNode* n = &h->node[slot]; if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly)) { setobj2t(L, gval(n), ra); luaC_barriert(L, h, ra); return pc; } else { // slow-path, may invoke Lua calls via __newindex metamethod TValue g; sethvalue(L, &g, h); L->cachedslot = slot; VM_PROTECT(luaV_settable(L, &g, kv, ra)); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); return pc; } } const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; StkId ra = VM_REG(LUAU_INSN_A(insn)); StkId rb = VM_REG(LUAU_INSN_B(insn)); uint32_t aux = *pc++; TValue* kv = VM_KV(aux); LUAU_ASSERT(ttisstring(kv)); // fast-path: built-in table if (ttistable(rb)) { Table* h = hvalue(rb); int slot = LUAU_INSN_C(insn) & h->nodemask8; LuaNode* n = &h->node[slot]; // fast-path: value is in expected slot if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)))) { setobj2s(L, ra, gval(n)); return pc; } else if (!h->metatable) { // fast-path: value is not in expected slot, but the table lookup doesn't involve metatable const TValue* res = luaH_getstr(h, tsvalue(kv)); if (res != luaO_nilobject) { int cachedslot = gval2slot(h, res); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, cachedslot); } setobj2s(L, ra, res); return pc; } else { // slow-path, may invoke Lua calls via __index metamethod L->cachedslot = slot; VM_PROTECT(luaV_gettable(L, rb, kv, ra)); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); return pc; } } else { // fast-path: user data with C __index TM const TValue* fn = 0; if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_INDEX)) && ttisfunction(fn) && clvalue(fn)->isC) { // note: it's safe to push arguments past top for complicated reasons (see top of the file) LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); StkId top = L->top; setobj2s(L, top + 0, fn); setobj2s(L, top + 1, rb); setobj2s(L, top + 2, kv); L->top = top + 3; L->cachedslot = LUAU_INSN_C(insn); VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); return pc; } else if (ttisvector(rb)) { // fast-path: quick case-insensitive comparison with "X"/"Y"/"Z" const char* name = getstr(tsvalue(kv)); int ic = (name[0] | ' ') - 'x'; #if LUA_VECTOR_SIZE == 4 // 'w' is before 'x' in ascii, so ic is -1 when indexing with 'w' if (ic == -1) ic = 3; #endif if (unsigned(ic) < LUA_VECTOR_SIZE && name[1] == '\0') { const float* v = rb->value.v; // silences ubsan when indexing v[] setnvalue(ra, v[ic]); return pc; } fn = fasttm(L, L->global->mt[LUA_TVECTOR], TM_INDEX); if (fn && ttisfunction(fn) && clvalue(fn)->isC) { // note: it's safe to push arguments past top for complicated reasons (see top of the file) LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize); StkId top = L->top; setobj2s(L, top + 0, fn); setobj2s(L, top + 1, rb); setobj2s(L, top + 2, kv); L->top = top + 3; L->cachedslot = LUAU_INSN_C(insn); VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn))); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); return pc; } // fall through to slow path } // fall through to slow path } // slow-path, may invoke Lua calls via __index metamethod VM_PROTECT(luaV_gettable(L, rb, kv, ra)); return pc; } const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; StkId ra = VM_REG(LUAU_INSN_A(insn)); StkId rb = VM_REG(LUAU_INSN_B(insn)); uint32_t aux = *pc++; TValue* kv = VM_KV(aux); LUAU_ASSERT(ttisstring(kv)); // fast-path: built-in table if (ttistable(rb)) { Table* h = hvalue(rb); int slot = LUAU_INSN_C(insn) & h->nodemask8; LuaNode* n = &h->node[slot]; // fast-path: value is in expected slot if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly)) { setobj2t(L, gval(n), ra); luaC_barriert(L, h, ra); return pc; } else if (fastnotm(h->metatable, TM_NEWINDEX) && !h->readonly) { VM_PROTECT_PC(); // set may fail TValue* res = luaH_setstr(L, h, tsvalue(kv)); int cachedslot = gval2slot(h, res); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, cachedslot); setobj2t(L, res, ra); luaC_barriert(L, h, ra); return pc; } else { // slow-path, may invoke Lua calls via __newindex metamethod L->cachedslot = slot; VM_PROTECT(luaV_settable(L, rb, kv, ra)); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); return pc; } } else { // fast-path: user data with C __newindex TM const TValue* fn = 0; if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_NEWINDEX)) && ttisfunction(fn) && clvalue(fn)->isC) { // note: it's safe to push arguments past top for complicated reasons (see top of the file) LUAU_ASSERT(L->top + 4 < L->stack + L->stacksize); StkId top = L->top; setobj2s(L, top + 0, fn); setobj2s(L, top + 1, rb); setobj2s(L, top + 2, kv); setobj2s(L, top + 3, ra); L->top = top + 4; L->cachedslot = LUAU_INSN_C(insn); VM_PROTECT(luaV_callTM(L, 3, -1)); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); return pc; } else { // slow-path, may invoke Lua calls via __newindex metamethod VM_PROTECT(luaV_settable(L, rb, kv, ra)); return pc; } } } const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; StkId ra = VM_REG(LUAU_INSN_A(insn)); Proto* pv = cl->l.p->p[LUAU_INSN_D(insn)]; LUAU_ASSERT(unsigned(LUAU_INSN_D(insn)) < unsigned(cl->l.p->sizep)); VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM // note: we save closure to stack early in case the code below wants to capture it by value Closure* ncl = luaF_newLclosure(L, pv->nups, cl->env, pv); setclvalue(L, ra, ncl); for (int ui = 0; ui < pv->nups; ++ui) { Instruction uinsn = *pc++; LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE); switch (LUAU_INSN_A(uinsn)) { case LCT_VAL: setobj(L, &ncl->l.uprefs[ui], VM_REG(LUAU_INSN_B(uinsn))); break; case LCT_REF: setupvalue(L, &ncl->l.uprefs[ui], luaF_findupval(L, VM_REG(LUAU_INSN_B(uinsn)))); break; case LCT_UPVAL: setobj(L, &ncl->l.uprefs[ui], VM_UV(LUAU_INSN_B(uinsn))); break; default: LUAU_ASSERT(!"Unknown upvalue capture type"); LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks } } VM_PROTECT(luaC_checkGC(L)); return pc; } const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; StkId ra = VM_REG(LUAU_INSN_A(insn)); StkId rb = VM_REG(LUAU_INSN_B(insn)); uint32_t aux = *pc++; TValue* kv = VM_KV(aux); LUAU_ASSERT(ttisstring(kv)); if (ttistable(rb)) { Table* h = hvalue(rb); // note: we can't use nodemask8 here because we need to query the main position of the table, and 8-bit nodemask8 only works // for predictive lookups LuaNode* n = &h->node[tsvalue(kv)->hash & (sizenode(h) - 1)]; const TValue* mt = 0; const LuaNode* mtn = 0; // fast-path: key is in the table in expected slot if (ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))) { // note: order of copies allows rb to alias ra+1 or ra setobj2s(L, ra + 1, rb); setobj2s(L, ra, gval(n)); } // fast-path: key is absent from the base, table has an __index table, and it has the result in the expected slot else if (gnext(n) == 0 && (mt = fasttm(L, hvalue(rb)->metatable, TM_INDEX)) && ttistable(mt) && (mtn = &hvalue(mt)->node[LUAU_INSN_C(insn) & hvalue(mt)->nodemask8]) && ttisstring(gkey(mtn)) && tsvalue(gkey(mtn)) == tsvalue(kv) && !ttisnil(gval(mtn))) { // note: order of copies allows rb to alias ra+1 or ra setobj2s(L, ra + 1, rb); setobj2s(L, ra, gval(mtn)); } else { // slow-path: handles full table lookup setobj2s(L, ra + 1, rb); L->cachedslot = LUAU_INSN_C(insn); VM_PROTECT(luaV_gettable(L, rb, kv, ra)); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); // recompute ra since stack might have been reallocated ra = VM_REG(LUAU_INSN_A(insn)); if (ttisnil(ra)) luaG_methoderror(L, ra + 1, tsvalue(kv)); } } else { Table* mt = ttisuserdata(rb) ? uvalue(rb)->metatable : L->global->mt[ttype(rb)]; const TValue* tmi = 0; // fast-path: metatable with __namecall if (const TValue* fn = fasttm(L, mt, TM_NAMECALL)) { // note: order of copies allows rb to alias ra+1 or ra setobj2s(L, ra + 1, rb); setobj2s(L, ra, fn); L->namecall = tsvalue(kv); } else if ((tmi = fasttm(L, mt, TM_INDEX)) && ttistable(tmi)) { Table* h = hvalue(tmi); int slot = LUAU_INSN_C(insn) & h->nodemask8; LuaNode* n = &h->node[slot]; // fast-path: metatable with __index that has method in expected slot if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)))) { // note: order of copies allows rb to alias ra+1 or ra setobj2s(L, ra + 1, rb); setobj2s(L, ra, gval(n)); } else { // slow-path: handles slot mismatch setobj2s(L, ra + 1, rb); L->cachedslot = slot; VM_PROTECT(luaV_gettable(L, rb, kv, ra)); // save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++ VM_PATCH_C(pc - 2, L->cachedslot); // recompute ra since stack might have been reallocated ra = VM_REG(LUAU_INSN_A(insn)); if (ttisnil(ra)) luaG_methoderror(L, ra + 1, tsvalue(kv)); } } else { // slow-path: handles non-table __index setobj2s(L, ra + 1, rb); VM_PROTECT(luaV_gettable(L, rb, kv, ra)); // recompute ra since stack might have been reallocated ra = VM_REG(LUAU_INSN_A(insn)); if (ttisnil(ra)) luaG_methoderror(L, ra + 1, tsvalue(kv)); } } // intentional fallthrough to CALL LUAU_ASSERT(LUAU_INSN_OP(*pc) == LOP_CALL); return pc; } const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; StkId ra = VM_REG(LUAU_INSN_A(insn)); if (ttisfunction(ra)) { // will be called during FORGLOOP } else { Table* mt = ttistable(ra) ? hvalue(ra)->metatable : ttisuserdata(ra) ? uvalue(ra)->metatable : cast_to(Table*, NULL); if (const TValue* fn = fasttm(L, mt, TM_ITER)) { setobj2s(L, ra + 1, ra); setobj2s(L, ra, fn); L->top = ra + 2; // func + self arg LUAU_ASSERT(L->top <= L->stack_last); VM_PROTECT(luaD_call(L, ra, 3)); L->top = L->ci->top; // recompute ra since stack might have been reallocated ra = VM_REG(LUAU_INSN_A(insn)); // protect against __iter returning nil, since nil is used as a marker for builtin iteration in FORGLOOP if (ttisnil(ra)) { VM_PROTECT_PC(); // next call always errors luaG_typeerror(L, ra, "call"); } } else if (fasttm(L, mt, TM_CALL)) { // table or userdata with __call, will be called during FORGLOOP // TODO: we might be able to stop supporting this depending on whether it's used in practice } else if (ttistable(ra)) { // set up registers for builtin iteration setobj2s(L, ra + 1, ra); setpvalue(ra + 2, reinterpret_cast(uintptr_t(0))); setnilvalue(ra); } else { VM_PROTECT_PC(); // next call always errors luaG_typeerror(L, ra, "iterate over"); } } pc += LUAU_INSN_D(insn); LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode)); return pc; } const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; int b = LUAU_INSN_B(insn) - 1; int n = cast_int(base - L->ci->func) - cl->l.p->numparams - 1; if (b == LUA_MULTRET) { VM_PROTECT(luaD_checkstack(L, n)); StkId ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack for (int j = 0; j < n; j++) setobj2s(L, ra + j, base - n + j); L->top = ra + n; return pc; } else { StkId ra = VM_REG(LUAU_INSN_A(insn)); for (int j = 0; j < b && j < n; j++) setobj2s(L, ra + j, base - n + j); for (int j = n; j < b; j++) setnilvalue(ra + j); return pc; } } const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; StkId ra = VM_REG(LUAU_INSN_A(insn)); TValue* kv = VM_KV(LUAU_INSN_D(insn)); Closure* kcl = clvalue(kv); VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM // clone closure if the environment is not shared // note: we save closure to stack early in case the code below wants to capture it by value Closure* ncl = (kcl->env == cl->env) ? kcl : luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p); setclvalue(L, ra, ncl); // this loop does three things: // - if the closure was created anew, it just fills it with upvalues // - if the closure from the constant table is used, it fills it with upvalues so that it can be shared in the future // - if the closure is reused, it checks if the reuse is safe via rawequal, and falls back to duplicating the closure // normally this would use two separate loops, for reuse check and upvalue setup, but MSVC codegen goes crazy if you do that for (int ui = 0; ui < kcl->nupvalues; ++ui) { Instruction uinsn = pc[ui]; LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE); LUAU_ASSERT(LUAU_INSN_A(uinsn) == LCT_VAL || LUAU_INSN_A(uinsn) == LCT_UPVAL); TValue* uv = (LUAU_INSN_A(uinsn) == LCT_VAL) ? VM_REG(LUAU_INSN_B(uinsn)) : VM_UV(LUAU_INSN_B(uinsn)); // check if the existing closure is safe to reuse if (ncl == kcl && luaO_rawequalObj(&ncl->l.uprefs[ui], uv)) continue; // lazily clone the closure and update the upvalues if (ncl == kcl && kcl->preload == 0) { ncl = luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p); setclvalue(L, ra, ncl); ui = -1; // restart the loop to fill all upvalues continue; } // this updates a newly created closure, or an existing closure created during preload, in which case we need a barrier setobj(L, &ncl->l.uprefs[ui], uv); luaC_barrier(L, ncl, uv); } // this is a noop if ncl is newly created or shared successfully, but it has to run after the closure is preloaded for the first time ncl->preload = 0; if (kcl != ncl) VM_PROTECT(luaC_checkGC(L)); pc += kcl->nupvalues; return pc; } const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; int numparams = LUAU_INSN_A(insn); // all fixed parameters are copied after the top so we need more stack space VM_PROTECT(luaD_checkstack(L, cl->stacksize + numparams)); // the caller must have filled extra fixed arguments with nil LUAU_ASSERT(cast_int(L->top - base) >= numparams); // move fixed parameters to final position StkId fixed = base; // first fixed argument base = L->top; // final position of first argument for (int i = 0; i < numparams; ++i) { setobj2s(L, base + i, fixed + i); setnilvalue(fixed + i); } // rewire our stack frame to point to the new base L->ci->base = base; L->ci->top = base + cl->stacksize; L->base = base; L->top = L->ci->top; return pc; } const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, StkId base, TValue* k) { [[maybe_unused]] Closure* cl = clvalue(L->ci->func); Instruction insn = *pc++; int hits = LUAU_INSN_E(insn); // update hits with saturated add and patch the instruction in place hits = (hits < (1 << 23) - 1) ? hits + 1 : hits; VM_PATCH_E(pc - 1, hits); return pc; } const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, StkId base, TValue* k) { LUAU_ASSERT(!"Unsupported deprecated opcode"); LUAU_UNREACHABLE(); }