diff --git a/.gitmodules b/.gitmodules index 91828e05a..e870c3190 100644 --- a/.gitmodules +++ b/.gitmodules @@ -3,3 +3,9 @@ url = https://github.com/tc39/test262 shallow = true update = none +[submodule "sljit"] + path = sljit + url = https://github.com/zherczeg/sljit +[submodule "quickjit"] + path = quickjit + url = https://github.com/bnoordhuis/quickjit diff --git a/CMakeLists.txt b/CMakeLists.txt index 76ab2a519..f63f3f8ee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -164,6 +164,7 @@ xoption(QJS_BUILD_CLI_STATIC "Build a static qjs executable" OFF) xoption(QJS_BUILD_CLI_WITH_MIMALLOC "Build the qjs executable with mimalloc" OFF) xoption(QJS_BUILD_CLI_WITH_STATIC_MIMALLOC "Build the qjs executable with mimalloc (statically linked)" OFF) xoption(QJS_DISABLE_PARSER "Disable JS source code parser" OFF) +xoption(QJS_ENABLE_JIT "Enable sljit JIT compiler" OFF) xoption(QJS_ENABLE_ASAN "Enable AddressSanitizer (ASan)" OFF) xoption(QJS_ENABLE_MSAN "Enable MemorySanitizer (MSan)" OFF) xoption(QJS_ENABLE_TSAN "Enable ThreadSanitizer (TSan)" OFF) @@ -255,6 +256,10 @@ set(qjs_sources if(QJS_BUILD_LIBC) list(APPEND qjs_sources quickjs-libc.c) endif() +if(QJS_ENABLE_JIT) + list(APPEND qjs_sources quickjs_sljit.c) + list(APPEND qjs_defines CONFIG_JIT) +endif() list(APPEND qjs_defines _GNU_SOURCE) if(WIN32) # NB: Windows 7 is EOL and we are only supporting in so far as it doesn't interfere with progress. diff --git a/quickjit b/quickjit new file mode 160000 index 000000000..d5a3e722a --- /dev/null +++ b/quickjit @@ -0,0 +1 @@ +Subproject commit d5a3e722aecb8412330282739b409bec1298eabe diff --git a/quickjs-jit.h b/quickjs-jit.h new file mode 100644 index 000000000..1014909f9 --- /dev/null +++ b/quickjs-jit.h @@ -0,0 +1,299 @@ +/* + * QuickJS JIT subsystem — shared header + * + * Defines the JitAux struct and declares functions shared between + * quickjs.c (interpreter / arithmetic helpers) and quickjs_sljit.c + * (code generation). + * + * This header deliberately avoids including any sljit or internal + * quickjs.c headers so it can be consumed by both translation units + * without pulling in implementation details. + */ + +#ifndef QUICKJS_JIT_H +#define QUICKJS_JIT_H + +#include "quickjs.h" + +#ifdef CONFIG_JIT + +/* ----- forward declarations (opaque to this header) ----- */ + +struct JSVarRef; +struct JSStackFrame; + +/* ---- Monomorphic inline cache for property access ---- */ +typedef struct PropIC { + void *cached_shape; /* JSShape* — ref-counted (pinned) to prevent ABA */ + uint32_t cached_offset; /* 0-based index into JSObject.prop[] */ +} PropIC; + +/* ---- Struct layout info for inline IC code generation ---- */ +typedef struct JitICLayout { + int obj_shape_off; /* offsetof(JSObject, shape) */ + int obj_prop_off; /* offsetof(JSObject, prop) */ + int prop_size; /* sizeof(JSProperty) — stride for prop[] array */ +} JitICLayout; + +void jit_get_ic_layout(JitICLayout *out); + +/* ----- JitDispatchEntry: maps bytecode position → native address ----- */ + +/* + * Used by the JIT exception handler (to dispatch to catch handlers) + * and by OP_ret (to return from finally blocks to the correct gosub + * call site). Built during JIT compilation, native addresses filled + * in after sljit_generate_code(). + */ +typedef struct JitDispatchEntry { + int bc_pos; /* absolute bytecode position */ + void *native_addr; /* native code address (from sljit label) */ +} JitDispatchEntry; + +/* ----- JitAux: per-call context passed from interpreter to JIT ----- */ + +/* + * JitAux bundles every piece of interpreter state that compiled JIT + * code needs to read or write. The field order is fixed — generated + * code accesses members via hard-coded offsetof values. + */ +typedef struct JitAux { + JSValue *stack_buf; /* operand stack base */ + JSValue *var_buf; /* local variable slots */ + JSValue *arg_buf; /* function argument slots */ + JSValue *sp; /* current stack pointer */ + struct JSVarRef **var_refs; /* closure variable references */ + struct JSStackFrame *sf; /* current stack frame */ + void *p; /* JSObject* — opaque to JIT code */ + JSContext *caller_ctx; /* calling context */ + JSValue ret_val; /* JIT stores its return value here */ + void *b; /* JSFunctionBytecode* — for cpool */ + JSValue this_obj; /* 'this' value for the call */ + JSValue new_target; /* new.target value */ + JSValue func_obj; /* the function being called */ + int argc; /* argument count */ + const JSValue *argv; /* argument values */ + JitDispatchEntry *dispatch_table; /* catch/return dispatch table */ + int dispatch_count; /* number of dispatch entries */ + PropIC *ic_cache; /* inline cache array for property access */ + int ic_count; /* number of IC entries */ + void *resume_native_addr; /* generator resume: native addr to jump to (NULL for initial call) */ + const uint8_t *resume_bc_pc; /* generator suspend: bytecode pc to resume at */ +} JitAux; + +/* ----- JitFunc: signature of every compiled JIT entry point ----- */ + +/* + * Returns 0 on normal return (result in aux->ret_val). + * Returns non-zero on exception. + */ +typedef int (*JitFunc)(JSContext *ctx, JitAux *aux); + +/* ----- code generation (quickjs_sljit.c) ----- */ + +/* + * Compile a bytecode function into native code via sljit. + * + * On success *out_jitcode receives the callable function pointer and + * *out_jit_code_ptr receives the opaque allocation that must later be + * freed with js_sljit_free(). + * + * *out_dispatch_table / *out_dispatch_count receive the dispatch table + * mapping bytecode positions to native addresses (for catch handlers + * and gosub return sites). Caller must free *out_dispatch_table. + */ +void js_sljit_compile(JSContext *ctx, + uint8_t *byte_code_buf, int byte_code_len, + int arg_count, int var_count, int stack_size, + JitFunc *out_jitcode, void **out_jit_code_ptr, + JitDispatchEntry **out_dispatch_table, + int *out_dispatch_count, + PropIC **out_ic_cache, + int *out_ic_count); + +/* Free native code previously allocated by js_sljit_compile(). */ +void js_sljit_free(void *jit_code_ptr); + +/* ----- arithmetic helpers (quickjs.c) ----- */ + +/* + * These helpers perform the OP_add / OP_sub / OP_mul operations, + * including the slow paths that handle non-int32 operands. They are + * defined in quickjs.c because they need access to internal functions + * (js_add_slow, js_binary_arith_slow, etc.). + * + * Each function operates on sp[-2] and sp[-1], replaces sp[-2] with + * the result. The caller (sljit code) handles the sp decrement. + * + * Returns 0 on success, non-zero on exception. + */ +int qjs_jit_add(JSContext *ctx, JSValue *sp); +int qjs_jit_sub(JSContext *ctx, JSValue *sp); +int qjs_jit_mul(JSContext *ctx, JSValue *sp); + +/* + * JIT exception handler: unwind the JS stack looking for a catch + * offset, then look up the corresponding native code address in the + * dispatch table. + * + * Returns the native address to jump to (catch handler), or NULL + * if no handler was found (exception should propagate to caller). + * + * On success: aux->sp is updated, exception value is on the stack. + */ +void *jit_unwind_exception(JSContext *ctx, JitAux *aux); + +/* + * OP_ret helper: pop return address from JS stack, look up the + * native code address in the dispatch table. + * + * Returns the native address to jump to, or NULL on error. + */ +void *qjs_jit_ret(JSContext *ctx, JitAux *aux); + +/* ----- per-opcode JIT helpers (quickjs.c) ----- */ +int jit_op_push_i32(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_push_i8(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_push_i16(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_push_small_int(JSContext *ctx, JitAux *aux, int opcode); +int jit_op_push_const(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_push_const8(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_push_bigint_i32(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_push_atom_value(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_push_literal(JSContext *ctx, JitAux *aux, int opcode); +int jit_op_object(JSContext *ctx, JitAux *aux); +int jit_op_push_this(JSContext *ctx, JitAux *aux); +int jit_op_special_object(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_rest(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_fclosure(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_fclosure8(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_drop(JSContext *ctx, JitAux *aux); +int jit_op_nip(JSContext *ctx, JitAux *aux); +int jit_op_dup(JSContext *ctx, JitAux *aux); +int jit_op_swap(JSContext *ctx, JitAux *aux); +int jit_op_get_loc(JSContext *ctx, JitAux *aux, int idx); +int jit_op_put_loc(JSContext *ctx, JitAux *aux, int idx); +int jit_op_set_loc(JSContext *ctx, JitAux *aux, int idx); +int jit_op_get_loc0_loc1(JSContext *ctx, JitAux *aux); +int jit_op_get_arg(JSContext *ctx, JitAux *aux, int idx); +int jit_op_put_arg(JSContext *ctx, JitAux *aux, int idx); +int jit_op_set_arg(JSContext *ctx, JitAux *aux, int idx); +int jit_op_get_var_ref(JSContext *ctx, JitAux *aux, int idx); +int jit_op_put_var_ref(JSContext *ctx, JitAux *aux, int idx); +int jit_op_set_var_ref(JSContext *ctx, JitAux *aux, int idx); +int jit_op_get_var_ref_check(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_put_var_ref_check(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_put_var_ref_check_init(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_set_loc_uninitialized(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_get_loc_check(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_put_loc_check(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_put_loc_check_init(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_close_loc(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_get_var(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_put_var(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_check_define_var(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_define_var(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_define_func(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_make_ref(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_make_var_ref(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_call_n(JSContext *ctx, JitAux *aux, int argc); +int jit_op_call(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_call_constructor(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_call_method(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_array_from(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_apply(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_check_ctor_return(JSContext *ctx, JitAux *aux); +int jit_op_check_ctor(JSContext *ctx, JitAux *aux); +int jit_op_init_ctor(JSContext *ctx, JitAux *aux); +int jit_op_check_brand(JSContext *ctx, JitAux *aux); +int jit_op_add_brand(JSContext *ctx, JitAux *aux); +int jit_op_throw(JSContext *ctx, JitAux *aux); +int jit_op_throw_error(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_eval(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_apply_eval(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_regexp(JSContext *ctx, JitAux *aux); +int jit_op_get_super(JSContext *ctx, JitAux *aux); +int jit_op_import(JSContext *ctx, JitAux *aux); +int jit_op_catch(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_nip_catch(JSContext *ctx, JitAux *aux); +int jit_op_for_in_start(JSContext *ctx, JitAux *aux); +int jit_op_for_in_next(JSContext *ctx, JitAux *aux); +int jit_op_for_of_start(JSContext *ctx, JitAux *aux); +int jit_op_for_of_next(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_iterator_get_value_done(JSContext *ctx, JitAux *aux); +int jit_op_iterator_check_object(JSContext *ctx, JitAux *aux); +int jit_op_iterator_close(JSContext *ctx, JitAux *aux); +int jit_op_iterator_next(JSContext *ctx, JitAux *aux); +int jit_op_iterator_call(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_lnot(JSContext *ctx, JitAux *aux); +int jit_op_get_field(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_get_field2(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_put_field(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_get_field_ic(JSContext *ctx, JitAux *aux, const uint8_t *pc, PropIC *ic); +int jit_op_get_field2_ic(JSContext *ctx, JitAux *aux, const uint8_t *pc, PropIC *ic); +int jit_op_put_field_ic(JSContext *ctx, JitAux *aux, const uint8_t *pc, PropIC *ic); +int jit_op_put_field_ic_hit(JSContext *ctx, JitAux *aux, PropIC *ic); +int jit_op_get_length(JSContext *ctx, JitAux *aux); +int jit_op_private_symbol(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_get_private_field(JSContext *ctx, JitAux *aux); +int jit_op_put_private_field(JSContext *ctx, JitAux *aux); +int jit_op_define_private_field(JSContext *ctx, JitAux *aux); +int jit_op_get_array_el(JSContext *ctx, JitAux *aux); +int jit_op_get_array_el2(JSContext *ctx, JitAux *aux); +int jit_op_get_ref_value(JSContext *ctx, JitAux *aux); +int jit_op_get_super_value(JSContext *ctx, JitAux *aux); +int jit_op_put_array_el(JSContext *ctx, JitAux *aux); +int jit_op_put_ref_value(JSContext *ctx, JitAux *aux); +int jit_op_put_super_value(JSContext *ctx, JitAux *aux); +int jit_op_define_field(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_set_name(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_set_name_computed(JSContext *ctx, JitAux *aux); +int jit_op_set_proto(JSContext *ctx, JitAux *aux); +int jit_op_set_home_object(JSContext *ctx, JitAux *aux); +int jit_op_define_array_el(JSContext *ctx, JitAux *aux); +int jit_op_append(JSContext *ctx, JitAux *aux); +int jit_op_copy_data_properties(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_define_method(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_define_class(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_add(JSContext *ctx, JitAux *aux); +int jit_op_sub(JSContext *ctx, JitAux *aux); +int jit_op_mul(JSContext *ctx, JitAux *aux); +int jit_op_div(JSContext *ctx, JitAux *aux); +int jit_op_mod(JSContext *ctx, JitAux *aux); +int jit_op_pow(JSContext *ctx, JitAux *aux); +int jit_op_add_loc(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_plus(JSContext *ctx, JitAux *aux); +int jit_op_neg(JSContext *ctx, JitAux *aux); +int jit_op_inc(JSContext *ctx, JitAux *aux); +int jit_op_dec(JSContext *ctx, JitAux *aux); +int jit_op_post_inc(JSContext *ctx, JitAux *aux); +int jit_op_post_dec(JSContext *ctx, JitAux *aux); +int jit_op_inc_loc(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_dec_loc(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_not(JSContext *ctx, JitAux *aux); +int jit_op_shl(JSContext *ctx, JitAux *aux); +int jit_op_shr(JSContext *ctx, JitAux *aux); +int jit_op_binary_logic(JSContext *ctx, JitAux *aux, int opcode); +int jit_op_relational(JSContext *ctx, JitAux *aux, int opcode); +int jit_op_eq(JSContext *ctx, JitAux *aux, int opcode); +int jit_op_strict_eq(JSContext *ctx, JitAux *aux, int opcode); +int jit_op_in(JSContext *ctx, JitAux *aux); +int jit_op_private_in(JSContext *ctx, JitAux *aux); +int jit_op_instanceof(JSContext *ctx, JitAux *aux); +int jit_op_typeof(JSContext *ctx, JitAux *aux); +int jit_op_delete(JSContext *ctx, JitAux *aux); +int jit_op_delete_var(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_to_object(JSContext *ctx, JitAux *aux); +int jit_op_to_propkey(JSContext *ctx, JitAux *aux); +int jit_op_to_propkey2(JSContext *ctx, JitAux *aux); +int jit_op_is_undefined_or_null(JSContext *ctx, JitAux *aux); +int jit_op_is_undefined(JSContext *ctx, JitAux *aux); +int jit_op_is_null(JSContext *ctx, JitAux *aux); +int jit_op_typeof_is_undefined(JSContext *ctx, JitAux *aux); +int jit_op_typeof_is_function(JSContext *ctx, JitAux *aux); +int jit_op_with(JSContext *ctx, JitAux *aux, const uint8_t *pc); +int jit_op_for_await_of_start(JSContext *ctx, JitAux *aux); + +#endif /* CONFIG_JIT */ + +#endif /* QUICKJS_JIT_H */ diff --git a/quickjs-libc.c b/quickjs-libc.c index f81a75a7c..4c254eb08 100644 --- a/quickjs-libc.c +++ b/quickjs-libc.c @@ -4494,6 +4494,31 @@ static JSValue js_print(JSContext *ctx, JSValueConst this_val, return JS_UNDEFINED; } +static JSValue js_jit_compiled(JSContext *ctx, JSValueConst this_val, + int argc, JSValueConst *argv) +{ + (void)this_val; + if (argc < 1) + return JS_FALSE; + return JS_NewBool(ctx, JS_IsJITCompiled(ctx, argv[0])); +} + +static JSValue js_jit_stats(JSContext *ctx, JSValueConst this_val, + int argc, JSValueConst *argv) +{ + uint32_t compiled, failed, calls; + JSValue obj; + (void)this_val; + (void)argc; + (void)argv; + JS_GetJITStats(JS_GetRuntime(ctx), &compiled, &failed, &calls); + obj = JS_NewObject(ctx); + JS_SetPropertyStr(ctx, obj, "compiled", JS_NewUint32(ctx, compiled)); + JS_SetPropertyStr(ctx, obj, "failed", JS_NewUint32(ctx, failed)); + JS_SetPropertyStr(ctx, obj, "calls", JS_NewUint32(ctx, calls)); + return obj; +} + void js_std_add_helpers(JSContext *ctx, int argc, char **argv) { JSValue global_obj, console, args; @@ -4519,6 +4544,15 @@ void js_std_add_helpers(JSContext *ctx, int argc, char **argv) JS_SetPropertyStr(ctx, global_obj, "print", JS_NewCFunction(ctx, js_print, "print", 1)); + { + JSValue jit_obj = JS_NewObject(ctx); + JS_SetPropertyStr(ctx, jit_obj, "compiled", + JS_NewCFunction(ctx, js_jit_compiled, "compiled", 1)); + JS_SetPropertyStr(ctx, jit_obj, "stats", + JS_NewCFunction(ctx, js_jit_stats, "stats", 0)); + JS_SetPropertyStr(ctx, global_obj, "__jit", jit_obj); + } + JS_FreeValue(ctx, global_obj); } diff --git a/quickjs.c b/quickjs.c index 546e188a0..904081ed8 100644 --- a/quickjs.c +++ b/quickjs.c @@ -47,6 +47,9 @@ #include "quickjs.h" #include "libregexp.h" #include "dtoa.h" +#ifdef CONFIG_JIT +#include "quickjs-jit.h" +#endif #if defined(EMSCRIPTEN) || defined(_MSC_VER) #define DIRECT_DISPATCH 0 @@ -346,6 +349,11 @@ struct JSRuntime { void *user_opaque; void *libc_opaque; JSRuntimeFinalizerState *finalizers; +#ifdef CONFIG_JIT + uint32_t jit_compile_count; /* functions successfully JIT compiled */ + uint32_t jit_compile_fail_count; /* functions that failed JIT compilation */ + uint32_t jit_call_count; /* total JIT entry invocations */ +#endif }; struct JSClass { @@ -791,6 +799,14 @@ typedef struct JSFunctionBytecode { int pc2line_len; uint8_t *pc2line_buf; char *source; +#ifdef CONFIG_JIT + void *jitcode; /* JitFunc - JIT compiled entry point */ + void *jit_code_ptr; /* opaque pointer for sljit_free_code() */ + JitDispatchEntry *jit_dispatch_table; /* catch/return dispatch table */ + int jit_dispatch_count; /* number of dispatch entries */ + PropIC *jit_ic_cache; /* inline cache for property access */ + int jit_ic_count; /* number of IC entries */ +#endif } JSFunctionBytecode; typedef struct JSBoundFunction { @@ -2231,6 +2247,14 @@ void JS_FreeRuntime(JSRuntime *rt) struct list_head *el, *el1; int i; +#ifdef CONFIG_JIT + if (getenv("QJS_JIT_STATS")) { + fprintf(stderr, "[JIT stats] compiled: %u, failed: %u, calls: %u\n", + rt->jit_compile_count, rt->jit_compile_fail_count, + rt->jit_call_count); + } +#endif + rt->in_free = true; JS_FreeValueRT(rt, rt->current_exception); @@ -6951,6 +6975,32 @@ bool JS_IsLiveObject(JSRuntime *rt, JSValueConst obj) return !p->free_mark; } +bool JS_IsJITCompiled(JSContext *ctx, JSValueConst val) +{ +#ifdef CONFIG_JIT + JSFunctionBytecode *b = JS_GetFunctionBytecode(val); + if (b && b->jitcode) + return true; +#endif + (void)ctx; + (void)val; + return false; +} + +void JS_GetJITStats(JSRuntime *rt, uint32_t *compiled, + uint32_t *failed, uint32_t *calls) +{ +#ifdef CONFIG_JIT + if (compiled) *compiled = rt->jit_compile_count; + if (failed) *failed = rt->jit_compile_fail_count; + if (calls) *calls = rt->jit_call_count; +#else + if (compiled) *compiled = 0; + if (failed) *failed = 0; + if (calls) *calls = 0; +#endif +} + /* Compute memory used by various object types */ /* XXX: poor man's approach to handling multiply referenced objects */ typedef struct JSMemoryUsage_helper { @@ -14983,6 +15033,96 @@ static no_inline __exception int js_add_slow(JSContext *ctx, JSValue *sp) return -1; } +#ifdef CONFIG_JIT +/* + * JIT arithmetic helpers. Called from sljit-compiled code via icall. + * Operate on sp[-2] and sp[-1], store result in sp[-2]. + * Return 0 on success, -1 on exception. + * The caller (sljit code) handles sp decrement. + */ +int qjs_jit_add(JSContext *ctx, JSValue *sp) +{ + JSValue op1 = sp[-2]; + JSValue op2 = sp[-1]; + + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int64_t r; + r = (int64_t)JS_VALUE_GET_INT(op1) + JS_VALUE_GET_INT(op2); + if (unlikely(r < INT32_MIN || r > INT32_MAX)) + sp[-2] = js_float64(r); + else + sp[-2] = js_int32(r); + return 0; + } else if (JS_VALUE_IS_BOTH_FLOAT(op1, op2)) { + sp[-2] = js_float64(JS_VALUE_GET_FLOAT64(op1) + + JS_VALUE_GET_FLOAT64(op2)); + return 0; + } + if (js_add_slow(ctx, sp)) + return -1; + return 0; +} + +int qjs_jit_sub(JSContext *ctx, JSValue *sp) +{ + JSValue op1 = sp[-2]; + JSValue op2 = sp[-1]; + + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int64_t r; + r = (int64_t)JS_VALUE_GET_INT(op1) - JS_VALUE_GET_INT(op2); + if (unlikely((int)r != r)) + sp[-2] = __JS_NewFloat64((double)r); + else + sp[-2] = js_int32(r); + return 0; + } else if (JS_VALUE_IS_BOTH_FLOAT(op1, op2)) { + sp[-2] = js_float64(JS_VALUE_GET_FLOAT64(op1) - + JS_VALUE_GET_FLOAT64(op2)); + return 0; + } + if (js_binary_arith_slow(ctx, sp, OP_sub)) + return -1; + return 0; +} + +int qjs_jit_mul(JSContext *ctx, JSValue *sp) +{ + JSValue op1 = sp[-2]; + JSValue op2 = sp[-1]; + + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int32_t v1, v2; + int64_t r; + double d; + v1 = JS_VALUE_GET_INT(op1); + v2 = JS_VALUE_GET_INT(op2); + r = (int64_t)v1 * v2; + if (unlikely((int)r != r)) { + d = (double)r; + goto mul_fp_res; + } + /* need to test zero case for -0 result */ + if (unlikely(r == 0 && (v1 | v2) < 0)) { + d = -0.0; + goto mul_fp_res; + } + sp[-2] = js_int32(r); + return 0; + mul_fp_res: + sp[-2] = js_float64(d); + return 0; + } else if (JS_VALUE_IS_BOTH_FLOAT(op1, op2)) { + sp[-2] = js_float64(JS_VALUE_GET_FLOAT64(op1) * + JS_VALUE_GET_FLOAT64(op2)); + return 0; + } + if (js_binary_arith_slow(ctx, sp, OP_mul)) + return -1; + return 0; +} +#endif /* CONFIG_JIT */ + static no_inline __exception int js_binary_logic_slow(JSContext *ctx, JSValue *sp, OPCodeEnum op) @@ -16897,330 +17037,3436 @@ static JSValue js_closure(JSContext *ctx, JSValue bfunc, JS_AUTOINIT_ID_PROTOTYPE, NULL, JS_PROP_WRITABLE); } - return func_obj; - fail: - /* bfunc is freed when func_obj is freed */ - JS_FreeValue(ctx, func_obj); - return JS_EXCEPTION; + return func_obj; + fail: + /* bfunc is freed when func_obj is freed */ + JS_FreeValue(ctx, func_obj); + return JS_EXCEPTION; +} + +#define JS_DEFINE_CLASS_HAS_HERITAGE (1 << 0) + +static int js_op_define_class(JSContext *ctx, JSValue *sp, + JSAtom class_name, int class_flags, + JSVarRef **cur_var_refs, + JSStackFrame *sf, bool is_computed_name) +{ + JSValue bfunc, parent_class, proto = JS_UNDEFINED; + JSValue ctor = JS_UNDEFINED, parent_proto = JS_UNDEFINED; + JSFunctionBytecode *b; + + parent_class = sp[-2]; + bfunc = sp[-1]; + + if (class_flags & JS_DEFINE_CLASS_HAS_HERITAGE) { + if (JS_IsNull(parent_class)) { + parent_proto = JS_NULL; + parent_class = js_dup(ctx->function_proto); + } else { + if (!JS_IsConstructor(ctx, parent_class)) { + JS_ThrowTypeError(ctx, "parent class must be constructor"); + goto fail; + } + parent_proto = JS_GetProperty(ctx, parent_class, JS_ATOM_prototype); + if (JS_IsException(parent_proto)) + goto fail; + if (!JS_IsNull(parent_proto) && !JS_IsObject(parent_proto)) { + JS_ThrowTypeError(ctx, "parent prototype must be an object or null"); + goto fail; + } + } + } else { + /* parent_class is JS_UNDEFINED in this case */ + parent_proto = js_dup(ctx->class_proto[JS_CLASS_OBJECT]); + parent_class = js_dup(ctx->function_proto); + } + proto = JS_NewObjectProto(ctx, parent_proto); + if (JS_IsException(proto)) + goto fail; + + b = JS_VALUE_GET_PTR(bfunc); + assert(b->func_kind == JS_FUNC_NORMAL); + ctor = JS_NewObjectProtoClass(ctx, parent_class, + JS_CLASS_BYTECODE_FUNCTION); + if (JS_IsException(ctor)) + goto fail; + ctor = js_closure2(ctx, ctor, b, cur_var_refs, sf); + bfunc = JS_UNDEFINED; + if (JS_IsException(ctor)) + goto fail; + js_method_set_home_object(ctx, ctor, proto); + JS_SetConstructorBit(ctx, ctor, true); + + JS_DefinePropertyValue(ctx, ctor, JS_ATOM_length, + js_int32(b->defined_arg_count), + JS_PROP_CONFIGURABLE); + + if (is_computed_name) { + if (JS_DefineObjectNameComputed(ctx, ctor, sp[-3], + JS_PROP_CONFIGURABLE) < 0) + goto fail; + } else { + if (JS_DefineObjectName(ctx, ctor, class_name, JS_PROP_CONFIGURABLE) < 0) + goto fail; + } + + /* the constructor property must be first. It can be overriden by + computed property names */ + if (JS_DefinePropertyValue(ctx, proto, JS_ATOM_constructor, + js_dup(ctor), + JS_PROP_CONFIGURABLE | + JS_PROP_WRITABLE | JS_PROP_THROW) < 0) + goto fail; + /* set the prototype property */ + if (JS_DefinePropertyValue(ctx, ctor, JS_ATOM_prototype, + js_dup(proto), JS_PROP_THROW) < 0) + goto fail; + + JS_FreeValue(ctx, parent_proto); + JS_FreeValue(ctx, parent_class); + + sp[-2] = ctor; + sp[-1] = proto; + return 0; + fail: + JS_FreeValue(ctx, parent_class); + JS_FreeValue(ctx, parent_proto); + JS_FreeValue(ctx, bfunc); + JS_FreeValue(ctx, proto); + JS_FreeValue(ctx, ctor); + sp[-2] = JS_UNDEFINED; + sp[-1] = JS_UNDEFINED; + return -1; +} + +static void close_var_ref(JSRuntime *rt, JSVarRef *var_ref) +{ + var_ref->value = js_dup(*var_ref->pvalue); + var_ref->pvalue = &var_ref->value; + /* the reference is no longer to a local variable */ + var_ref->is_detached = true; + add_gc_object(rt, &var_ref->header, JS_GC_OBJ_TYPE_VAR_REF); +} + +static void close_var_refs(JSRuntime *rt, JSStackFrame *sf) +{ + JSVarRef *var_ref; + int i; + + for (i = 0; i < sf->var_ref_count; i++) { + var_ref = sf->var_refs[i]; + if (var_ref) + close_var_ref(rt, var_ref); + } +} + +static void close_lexical_var(JSContext *ctx, JSFunctionBytecode *b, + JSStackFrame *sf, int var_idx) +{ + JSVarRef *var_ref; + int var_ref_idx; + + var_ref_idx = b->vardefs[b->arg_count + var_idx].var_ref_idx; + var_ref = sf->var_refs[var_ref_idx]; + if (var_ref) { + close_var_ref(ctx->rt, var_ref); + sf->var_refs[var_ref_idx] = NULL; + } +} + +#define JS_CALL_FLAG_COPY_ARGV (1 << 1) +#define JS_CALL_FLAG_GENERATOR (1 << 2) + +static JSValue js_call_c_function(JSContext *ctx, JSValueConst func_obj, + JSValueConst this_obj, + int argc, JSValueConst *argv, int flags) +{ + JSRuntime *rt = ctx->rt; + JSCFunctionType func; + JSObject *p; + JSStackFrame sf_s, *sf = &sf_s, *prev_sf; + JSValue ret_val; + JSValueConst *arg_buf; + int arg_count, i; + JSCFunctionEnum cproto; + + p = JS_VALUE_GET_OBJ(func_obj); + cproto = p->u.cfunc.cproto; + arg_count = p->u.cfunc.length; + + /* better to always check stack overflow */ + if (js_check_stack_overflow(rt, sizeof(arg_buf[0]) * arg_count)) + return JS_ThrowStackOverflow(ctx); + + prev_sf = rt->current_stack_frame; + sf->prev_frame = prev_sf; + rt->current_stack_frame = sf; + ctx = p->u.cfunc.realm; /* change the current realm */ + + sf->is_strict_mode = false; + sf->cur_func = unsafe_unconst(func_obj); + sf->arg_count = argc; + arg_buf = argv; + + if (unlikely(argc < arg_count)) { + /* ensure that at least argc_count arguments are readable */ + arg_buf = alloca(sizeof(arg_buf[0]) * arg_count); + for(i = 0; i < argc; i++) + arg_buf[i] = argv[i]; + for(i = argc; i < arg_count; i++) + arg_buf[i] = JS_UNDEFINED; + sf->arg_count = arg_count; + } + sf->arg_buf = (JSValue *)arg_buf; + + func = p->u.cfunc.c_function; + switch(cproto) { + case JS_CFUNC_constructor: + case JS_CFUNC_constructor_or_func: + if (!(flags & JS_CALL_FLAG_CONSTRUCTOR)) { + if (cproto == JS_CFUNC_constructor) { + not_a_constructor: + ret_val = JS_ThrowTypeError(ctx, "must be called with new"); + break; + } else { + this_obj = JS_UNDEFINED; + } + } + /* here this_obj is new_target */ + /* fall thru */ + case JS_CFUNC_generic: + ret_val = func.generic(ctx, this_obj, argc, arg_buf); + break; + case JS_CFUNC_constructor_magic: + case JS_CFUNC_constructor_or_func_magic: + if (!(flags & JS_CALL_FLAG_CONSTRUCTOR)) { + if (cproto == JS_CFUNC_constructor_magic) { + goto not_a_constructor; + } else { + this_obj = JS_UNDEFINED; + } + } + /* fall thru */ + case JS_CFUNC_generic_magic: + ret_val = func.generic_magic(ctx, this_obj, argc, arg_buf, + p->u.cfunc.magic); + break; + case JS_CFUNC_getter: + ret_val = func.getter(ctx, this_obj); + break; + case JS_CFUNC_setter: + ret_val = func.setter(ctx, this_obj, arg_buf[0]); + break; + case JS_CFUNC_getter_magic: + ret_val = func.getter_magic(ctx, this_obj, p->u.cfunc.magic); + break; + case JS_CFUNC_setter_magic: + ret_val = func.setter_magic(ctx, this_obj, arg_buf[0], p->u.cfunc.magic); + break; + case JS_CFUNC_f_f: + { + double d1; + + if (unlikely(JS_ToFloat64(ctx, &d1, arg_buf[0]))) { + ret_val = JS_EXCEPTION; + break; + } + ret_val = js_number(func.f_f(d1)); + } + break; + case JS_CFUNC_f_f_f: + { + double d1, d2; + + if (unlikely(JS_ToFloat64(ctx, &d1, arg_buf[0]))) { + ret_val = JS_EXCEPTION; + break; + } + if (unlikely(JS_ToFloat64(ctx, &d2, arg_buf[1]))) { + ret_val = JS_EXCEPTION; + break; + } + ret_val = js_number(func.f_f_f(d1, d2)); + } + break; + case JS_CFUNC_iterator_next: + { + int done; + ret_val = func.iterator_next(ctx, this_obj, argc, arg_buf, + &done, p->u.cfunc.magic); + if (!JS_IsException(ret_val) && done != 2) { + ret_val = js_create_iterator_result(ctx, ret_val, done); + } + } + break; + default: + abort(); + } + + rt->current_stack_frame = sf->prev_frame; + return ret_val; +} + +static JSValue js_call_bound_function(JSContext *ctx, JSValueConst func_obj, + JSValueConst this_obj, + int argc, JSValueConst *argv, int flags) +{ + JSObject *p; + JSBoundFunction *bf; + JSValueConst *arg_buf, new_target; + int arg_count, i; + + p = JS_VALUE_GET_OBJ(func_obj); + bf = p->u.bound_function; + arg_count = bf->argc + argc; + if (js_check_stack_overflow(ctx->rt, sizeof(JSValue) * arg_count)) + return JS_ThrowStackOverflow(ctx); + arg_buf = alloca(sizeof(JSValue) * arg_count); + for(i = 0; i < bf->argc; i++) { + arg_buf[i] = bf->argv[i]; + } + for(i = 0; i < argc; i++) { + arg_buf[bf->argc + i] = argv[i]; + } + if (flags & JS_CALL_FLAG_CONSTRUCTOR) { + new_target = this_obj; + if (js_same_value(ctx, func_obj, new_target)) + new_target = bf->func_obj; + return JS_CallConstructor2(ctx, bf->func_obj, new_target, + arg_count, arg_buf); + } else { + return JS_Call(ctx, bf->func_obj, bf->this_val, + arg_count, arg_buf); + } +} + +/* argument of OP_special_object */ +typedef enum { + OP_SPECIAL_OBJECT_ARGUMENTS, + OP_SPECIAL_OBJECT_MAPPED_ARGUMENTS, + OP_SPECIAL_OBJECT_THIS_FUNC, + OP_SPECIAL_OBJECT_NEW_TARGET, + OP_SPECIAL_OBJECT_HOME_OBJECT, + OP_SPECIAL_OBJECT_VAR_OBJECT, + OP_SPECIAL_OBJECT_IMPORT_META, + OP_SPECIAL_OBJECT_NULL_PROTO, +} OPSpecialObjectEnum; + +#define FUNC_RET_AWAIT 0 +#define FUNC_RET_YIELD 1 +#define FUNC_RET_YIELD_STAR 2 + +#ifdef ENABLE_DUMPS // JS_DUMP_BYTECODE_* +static void dump_single_byte_code(JSContext *ctx, const uint8_t *pc, + JSFunctionBytecode *b, int start_pos); +static void print_func_name(JSFunctionBytecode *b); +#endif + +#ifdef CONFIG_JIT + +/* ---- per-opcode JIT helper functions ---- */ +/* Each helper extracts one case (or group of related cases) from the + * former qjs_jit_exec() mega-switch so that JIT-compiled code can + * call them directly via sljit_emit_icall. + * + * Signature conventions: + * int jit_op_XXX(JSContext *ctx, JitAux *aux, const uint8_t *pc) + * — for opcodes that read bytecode operands from pc+1 + * int jit_op_XXX(JSContext *ctx, JitAux *aux) + * — for opcodes with no bytecode operands + * int jit_op_XXX(JSContext *ctx, JitAux *aux, int param) + * — for grouped opcodes needing an extra discriminator + * + * Return: 0 = success, -1 = exception. + * Exception: jit_op_with returns 0/1/-1 (not-found/found/exception). + */ + +/* ---- push integer constants ---- */ + +int jit_op_push_i32(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + (void)ctx; + *sp++ = js_int32(get_u32(pc_arg)); + aux->sp = sp; + return 0; +} + +int jit_op_push_i8(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + (void)ctx; + *sp++ = js_int32(get_i8(pc_arg)); + aux->sp = sp; + return 0; +} + +int jit_op_push_i16(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + (void)ctx; + *sp++ = js_int32(get_i16(pc_arg)); + aux->sp = sp; + return 0; +} + +int jit_op_push_small_int(JSContext *ctx, JitAux *aux, int opcode) +{ + JSValue *sp = aux->sp; + (void)ctx; + *sp++ = js_int32(opcode - OP_push_0); + aux->sp = sp; + return 0; +} + +/* ---- push constant pool / atom values ---- */ + +int jit_op_push_const(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + (void)ctx; + *sp++ = js_dup(b->cpool[get_u32(pc_arg)]); + aux->sp = sp; + return 0; +} + +int jit_op_push_const8(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + (void)ctx; + *sp++ = js_dup(b->cpool[pc_arg[0]]); + aux->sp = sp; + return 0; +} + +int jit_op_push_bigint_i32(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + *sp++ = __JS_NewShortBigInt(ctx, (int)get_u32(pc_arg)); + aux->sp = sp; + return 0; +} + +int jit_op_push_atom_value(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + *sp++ = JS_AtomToValue(ctx, get_u32(pc_arg)); + aux->sp = sp; + return 0; +} + +/* ---- push literals ---- */ + +int jit_op_push_literal(JSContext *ctx, JitAux *aux, int opcode) +{ + JSValue *sp = aux->sp; + JSRuntime *rt = ctx->rt; + switch (opcode) { + case OP_undefined: *sp++ = JS_UNDEFINED; break; + case OP_null: *sp++ = JS_NULL; break; + case OP_push_false: *sp++ = JS_FALSE; break; + case OP_push_true: *sp++ = JS_TRUE; break; + case OP_push_empty_string: *sp++ = js_empty_string(rt); break; + default: abort(); + } + aux->sp = sp; + return 0; +} + +/* ---- push object / this ---- */ + +int jit_op_object(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + *sp++ = JS_NewObject(ctx); + if (unlikely(JS_IsException(sp[-1]))) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_push_this(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSValue this_obj = aux->this_obj; + JSValue val; + if (!b->is_strict_mode) { + uint32_t tag = JS_VALUE_GET_TAG(this_obj); + if (likely(tag == JS_TAG_OBJECT)) { + val = js_dup(this_obj); + } else if (tag == JS_TAG_NULL || tag == JS_TAG_UNDEFINED) { + val = js_dup(ctx->global_obj); + } else { + val = JS_ToObject(ctx, this_obj); + if (JS_IsException(val)) { + aux->sp = sp; + return -1; + } + } + } else { + val = js_dup(this_obj); + } + *sp++ = val; + aux->sp = sp; + return 0; +} + +int jit_op_special_object(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSStackFrame *sf = (JSStackFrame *)aux->sf; + JSObject *p = (JSObject *)aux->p; + int arg = pc_arg[0]; + switch(arg) { + case OP_SPECIAL_OBJECT_ARGUMENTS: + *sp++ = js_build_arguments(ctx, aux->argc, + (JSValueConst *)aux->argv); + if (unlikely(JS_IsException(sp[-1]))) + goto jit_exception; + break; + case OP_SPECIAL_OBJECT_MAPPED_ARGUMENTS: + *sp++ = js_build_mapped_arguments(ctx, aux->argc, + (JSValueConst *)aux->argv, + sf, + min_int(aux->argc, + b->arg_count)); + if (unlikely(JS_IsException(sp[-1]))) + goto jit_exception; + break; + case OP_SPECIAL_OBJECT_THIS_FUNC: + *sp++ = js_dup(sf->cur_func); + break; + case OP_SPECIAL_OBJECT_NEW_TARGET: + *sp++ = js_dup(aux->new_target); + break; + case OP_SPECIAL_OBJECT_HOME_OBJECT: + { + JSObject *p1; + p1 = p->u.func.home_object; + if (unlikely(!p1)) + *sp++ = JS_UNDEFINED; + else + *sp++ = js_dup(JS_MKPTR(JS_TAG_OBJECT, p1)); + } + break; + case OP_SPECIAL_OBJECT_VAR_OBJECT: + *sp++ = JS_NewObjectProto(ctx, JS_NULL); + if (unlikely(JS_IsException(sp[-1]))) + goto jit_exception; + break; + case OP_SPECIAL_OBJECT_IMPORT_META: + *sp++ = js_import_meta(ctx); + if (unlikely(JS_IsException(sp[-1]))) + goto jit_exception; + break; + case OP_SPECIAL_OBJECT_NULL_PROTO: + *sp++ = JS_NewObjectProtoClass(ctx, JS_NULL, + JS_CLASS_OBJECT); + if (unlikely(JS_IsException(sp[-1]))) + goto jit_exception; + break; + default: + abort(); + } + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +int jit_op_rest(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int first = get_u16(pc_arg); + int ri, n; + ri = min_int(first, aux->argc); + n = aux->argc - ri; + *sp++ = js_create_array(ctx, n, + (JSValueConst *)aux->argv + ri); + if (unlikely(JS_IsException(sp[-1]))) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +/* ---- closures ---- */ + +int jit_op_fclosure(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSStackFrame *sf = (JSStackFrame *)aux->sf; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + JSValue bfunc = js_dup(b->cpool[get_u32(pc_arg)]); + *sp++ = js_closure(ctx, bfunc, var_refs, sf); + if (unlikely(JS_IsException(sp[-1]))) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_fclosure8(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSStackFrame *sf = (JSStackFrame *)aux->sf; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + *sp++ = js_closure(ctx, js_dup(b->cpool[pc_arg[0]]), var_refs, sf); + if (unlikely(JS_IsException(sp[-1]))) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +/* ---- stack manipulation ---- */ + +int jit_op_drop(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JS_FreeValue(ctx, sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_nip(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JS_FreeValue(ctx, sp[-2]); + sp[-2] = sp[-1]; + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_dup(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + (void)ctx; + sp[0] = js_dup(sp[-1]); + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_swap(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue tmp; + (void)ctx; + tmp = sp[-2]; + sp[-2] = sp[-1]; + sp[-1] = tmp; + aux->sp = sp; + return 0; +} + +/* ---- local variable access ---- */ + +int jit_op_get_loc(JSContext *ctx, JitAux *aux, int idx) +{ + JSValue *sp = aux->sp; + (void)ctx; + sp[0] = js_dup(aux->var_buf[idx]); + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_put_loc(JSContext *ctx, JitAux *aux, int idx) +{ + JSValue *sp = aux->sp; + set_value(ctx, &aux->var_buf[idx], sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_set_loc(JSContext *ctx, JitAux *aux, int idx) +{ + JSValue *sp = aux->sp; + set_value(ctx, &aux->var_buf[idx], js_dup(sp[-1])); + aux->sp = sp; + return 0; +} + +int jit_op_get_loc0_loc1(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + (void)ctx; + *sp++ = js_dup(aux->var_buf[0]); + *sp++ = js_dup(aux->var_buf[1]); + aux->sp = sp; + return 0; +} + +/* ---- argument access ---- */ + +int jit_op_get_arg(JSContext *ctx, JitAux *aux, int idx) +{ + JSValue *sp = aux->sp; + (void)ctx; + sp[0] = js_dup(aux->arg_buf[idx]); + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_put_arg(JSContext *ctx, JitAux *aux, int idx) +{ + JSValue *sp = aux->sp; + set_value(ctx, &aux->arg_buf[idx], sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_set_arg(JSContext *ctx, JitAux *aux, int idx) +{ + JSValue *sp = aux->sp; + set_value(ctx, &aux->arg_buf[idx], js_dup(sp[-1])); + aux->sp = sp; + return 0; +} + +/* ---- var ref access ---- */ + +int jit_op_get_var_ref(JSContext *ctx, JitAux *aux, int idx) +{ + JSValue *sp = aux->sp; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + (void)ctx; + sp[0] = js_dup(*var_refs[idx]->pvalue); + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_put_var_ref(JSContext *ctx, JitAux *aux, int idx) +{ + JSValue *sp = aux->sp; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + set_value(ctx, var_refs[idx]->pvalue, sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_set_var_ref(JSContext *ctx, JitAux *aux, int idx) +{ + JSValue *sp = aux->sp; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + set_value(ctx, var_refs[idx]->pvalue, js_dup(sp[-1])); + aux->sp = sp; + return 0; +} + +/* ---- checked var ref access ---- */ + +int jit_op_get_var_ref_check(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + int idx; + JSValue val; + idx = get_u16(pc_arg); + val = *var_refs[idx]->pvalue; + if (unlikely(JS_IsUninitialized(val))) { + JS_ThrowReferenceErrorUninitialized2(ctx, b, idx, true); + aux->sp = sp; + return -1; + } + sp[0] = js_dup(val); + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_put_var_ref_check(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + int idx; + idx = get_u16(pc_arg); + if (unlikely(JS_IsUninitialized(*var_refs[idx]->pvalue))) { + JS_ThrowReferenceErrorUninitialized2(ctx, b, idx, true); + aux->sp = sp; + return -1; + } + set_value(ctx, var_refs[idx]->pvalue, sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_put_var_ref_check_init(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + int idx; + idx = get_u16(pc_arg); + if (unlikely(!JS_IsUninitialized(*var_refs[idx]->pvalue))) { + JS_ThrowReferenceErrorUninitialized2(ctx, b, idx, true); + aux->sp = sp; + return -1; + } + set_value(ctx, var_refs[idx]->pvalue, sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +/* ---- checked local access ---- */ + +int jit_op_set_loc_uninitialized(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + const uint8_t *pc_arg = pc + 1; + int idx; + idx = get_u16(pc_arg); + set_value(ctx, &aux->var_buf[idx], JS_UNINITIALIZED); + return 0; +} + +int jit_op_get_loc_check(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSContext *caller_ctx = aux->caller_ctx; + JSValue *var_buf = aux->var_buf; + int idx; + idx = get_u16(pc_arg); + if (unlikely(JS_IsUninitialized(var_buf[idx]))) { + JS_ThrowReferenceErrorUninitialized2(caller_ctx, b, idx, false); + aux->sp = sp; + return -1; + } + sp[0] = js_dup(var_buf[idx]); + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_put_loc_check(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSContext *caller_ctx = aux->caller_ctx; + JSValue *var_buf = aux->var_buf; + int idx; + (void)b; + idx = get_u16(pc_arg); + if (unlikely(JS_IsUninitialized(var_buf[idx]))) { + JS_ThrowReferenceErrorUninitialized2(caller_ctx, b, idx, false); + aux->sp = sp; + return -1; + } + set_value(ctx, &var_buf[idx], sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_put_loc_check_init(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSContext *caller_ctx = aux->caller_ctx; + JSValue *var_buf = aux->var_buf; + int idx; + idx = get_u16(pc_arg); + if (unlikely(!JS_IsUninitialized(var_buf[idx]))) { + JS_ThrowReferenceError(caller_ctx, + "'this' can be initialized only once"); + aux->sp = sp; + return -1; + } + set_value(ctx, &var_buf[idx], sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_close_loc(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSStackFrame *sf = (JSStackFrame *)aux->sf; + int idx; + idx = get_u16(pc_arg); + close_lexical_var(ctx, b, sf, idx); + return 0; +} + +/* ---- global variable access ---- */ + +int jit_op_get_var(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + OPCodeEnum opcode = (OPCodeEnum)*pc; + JSValue val; + JSAtom atom; + atom = get_u32(pc_arg); + val = JS_GetGlobalVar(ctx, atom, opcode - OP_get_var_undef); + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + *sp++ = val; + aux->sp = sp; + return 0; +} + +int jit_op_put_var(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + OPCodeEnum opcode = (OPCodeEnum)*pc; + int ret; + JSAtom atom; + atom = get_u32(pc_arg); + ret = JS_SetGlobalVar(ctx, atom, sp[-1], opcode - OP_put_var); + sp--; + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_check_define_var(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + const uint8_t *pc_arg = pc + 1; + JSAtom atom; + int flags; + (void)aux; + atom = get_u32(pc_arg); + flags = pc_arg[4]; + if (JS_CheckDefineGlobalVar(ctx, atom, flags)) + return -1; + return 0; +} + +int jit_op_define_var(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + const uint8_t *pc_arg = pc + 1; + JSAtom atom; + int flags; + (void)aux; + atom = get_u32(pc_arg); + flags = pc_arg[4]; + if (JS_DefineGlobalVar(ctx, atom, flags)) + return -1; + return 0; +} + +int jit_op_define_func(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSAtom atom; + int flags; + atom = get_u32(pc_arg); + flags = pc_arg[4]; + if (JS_DefineGlobalFunction(ctx, atom, sp[-1], flags)) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +/* ---- make refs ---- */ + +int jit_op_make_ref(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + OPCodeEnum opcode = (OPCodeEnum)*pc; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + JSStackFrame *sf = (JSStackFrame *)aux->sf; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + JSRuntime *rt = ctx->rt; + JSVarRef *var_ref; + JSProperty *pr; + JSAtom atom; + int idx; + (void)b; + atom = get_u32(pc_arg); + idx = get_u16(pc_arg + 4); + *sp++ = JS_NewObjectProto(ctx, JS_NULL); + if (unlikely(JS_IsException(sp[-1]))) + goto jit_exception; + if (opcode == OP_make_var_ref_ref) { + var_ref = var_refs[idx]; + var_ref->header.ref_count++; + } else { + var_ref = get_var_ref(ctx, sf, idx, + opcode == OP_make_arg_ref); + if (!var_ref) + goto jit_exception; + } + pr = add_property(ctx, JS_VALUE_GET_OBJ(sp[-1]), atom, + JS_PROP_WRITABLE | JS_PROP_VARREF); + if (!pr) { + free_var_ref(rt, var_ref); + goto jit_exception; + } + pr->u.var_ref = var_ref; + *sp++ = JS_AtomToValue(ctx, atom); + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +int jit_op_make_var_ref(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSAtom atom; + atom = get_u32(pc_arg); + if (JS_GetGlobalVarRef(ctx, atom, sp)) { + aux->sp = sp; + return -1; + } + sp += 2; + aux->sp = sp; + return 0; +} + +/* ---- call helpers ---- */ + +int jit_op_call_n(JSContext *ctx, JitAux *aux, int argc) +{ + JSValue *sp = aux->sp; + JSValue *call_argv = sp - argc; + JSValue ret_val; + int i; + ret_val = JS_CallInternal(ctx, call_argv[-1], JS_UNDEFINED, + JS_UNDEFINED, argc, + vc(call_argv), 0); + if (unlikely(JS_IsException(ret_val))) { + aux->sp = sp; + return -1; + } + for (i = -1; i < argc; i++) + JS_FreeValue(ctx, call_argv[i]); + sp -= argc + 1; + *sp++ = ret_val; + aux->sp = sp; + return 0; +} + +int jit_op_call(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int call_argc; + JSValue *call_argv; + JSValue ret_val; + int i; + call_argc = get_u16(pc_arg); + call_argv = sp - call_argc; + ret_val = JS_CallInternal(ctx, call_argv[-1], JS_UNDEFINED, + JS_UNDEFINED, call_argc, + vc(call_argv), 0); + if (unlikely(JS_IsException(ret_val))) { + aux->sp = sp; + return -1; + } + for (i = -1; i < call_argc; i++) + JS_FreeValue(ctx, call_argv[i]); + sp -= call_argc + 1; + *sp++ = ret_val; + aux->sp = sp; + return 0; +} + +int jit_op_call_constructor(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int call_argc; + JSValue *call_argv; + JSValue ret_val; + int i; + call_argc = get_u16(pc_arg); + call_argv = sp - call_argc; + ret_val = JS_CallConstructorInternal(ctx, call_argv[-2], + call_argv[-1], call_argc, + vc(call_argv), 0); + if (unlikely(JS_IsException(ret_val))) { + aux->sp = sp; + return -1; + } + for (i = -2; i < call_argc; i++) + JS_FreeValue(ctx, call_argv[i]); + sp -= call_argc + 2; + *sp++ = ret_val; + aux->sp = sp; + return 0; +} + +int jit_op_call_method(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int call_argc; + JSValue *call_argv; + JSValue ret_val; + int i; + call_argc = get_u16(pc_arg); + call_argv = sp - call_argc; + ret_val = JS_CallInternal(ctx, call_argv[-1], call_argv[-2], + JS_UNDEFINED, call_argc, + vc(call_argv), 0); + if (unlikely(JS_IsException(ret_val))) { + aux->sp = sp; + return -1; + } + for (i = -2; i < call_argc; i++) + JS_FreeValue(ctx, call_argv[i]); + sp -= call_argc + 2; + *sp++ = ret_val; + aux->sp = sp; + return 0; +} + +int jit_op_array_from(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int call_argc; + JSValue *call_argv; + JSValue ret_val; + call_argc = get_u16(pc_arg); + call_argv = sp - call_argc; + ret_val = JS_NewArrayFrom(ctx, call_argc, call_argv); + sp -= call_argc; + if (unlikely(JS_IsException(ret_val))) { + aux->sp = sp; + return -1; + } + *sp++ = ret_val; + aux->sp = sp; + return 0; +} + +int jit_op_apply(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int magic; + JSValue ret_val; + magic = get_u16(pc_arg); + ret_val = js_function_apply(ctx, sp[-3], 2, vc(&sp[-2]), + magic); + if (unlikely(JS_IsException(ret_val))) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-3]); + JS_FreeValue(ctx, sp[-2]); + JS_FreeValue(ctx, sp[-1]); + sp -= 3; + *sp++ = ret_val; + aux->sp = sp; + return 0; +} + +/* ---- constructor checks ---- */ + +int jit_op_check_ctor_return(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSContext *caller_ctx = aux->caller_ctx; + if (!JS_IsObject(sp[-1])) { + if (!JS_IsUndefined(sp[-1])) { + JS_ThrowTypeError(caller_ctx, + "derived class constructor must return an object or undefined"); + aux->sp = sp; + return -1; + } + sp[0] = JS_TRUE; + } else { + sp[0] = JS_FALSE; + } + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_check_ctor(JSContext *ctx, JitAux *aux) +{ + if (JS_IsUndefined(aux->new_target)) { + JS_ThrowTypeError(ctx, + "class constructors must be invoked with 'new'"); + return -1; + } + return 0; +} + +int jit_op_init_ctor(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue super, ret; + if (JS_IsUndefined(aux->new_target)) { + JS_ThrowTypeError(ctx, + "class constructors must be invoked with 'new'"); + aux->sp = sp; + return -1; + } + super = JS_GetPrototype(ctx, aux->func_obj); + if (JS_IsException(super)) { + aux->sp = sp; + return -1; + } + ret = JS_CallConstructor2(ctx, super, aux->new_target, + aux->argc, + (JSValueConst *)aux->argv); + JS_FreeValue(ctx, super); + if (JS_IsException(ret)) { + aux->sp = sp; + return -1; + } + *sp++ = ret; + aux->sp = sp; + return 0; +} + +int jit_op_check_brand(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + int ret = JS_CheckBrand(ctx, sp[-2], sp[-1]); + if (ret < 0) { + aux->sp = sp; + return -1; + } + if (!ret) { + JS_ThrowTypeError(ctx, "invalid brand on object"); + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_add_brand(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (JS_AddBrand(ctx, sp[-2], sp[-1]) < 0) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-2]); + JS_FreeValue(ctx, sp[-1]); + sp -= 2; + aux->sp = sp; + return 0; +} + +/* ---- throw ---- */ + +int jit_op_throw(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JS_Throw(ctx, *--sp); + aux->sp = sp; + return -1; +} + +int jit_op_throw_error(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + const uint8_t *pc_arg = pc + 1; + JSAtom atom; + int type; + (void)aux; + atom = get_u32(pc_arg); + type = pc_arg[4]; + if (type == 0 /*JS_THROW_VAR_RO*/) + JS_ThrowTypeErrorReadOnly(ctx, JS_PROP_THROW, atom); + else if (type == 1 /*JS_THROW_VAR_REDECL*/) + JS_ThrowSyntaxErrorVarRedeclaration(ctx, atom); + else if (type == 2 /*JS_THROW_VAR_UNINITIALIZED*/) + JS_ThrowReferenceErrorUninitialized(ctx, atom); + else if (type == 3 /*JS_THROW_ERROR_DELETE_SUPER*/) + JS_ThrowReferenceError(ctx, + "unsupported reference to 'super'"); + else if (type == 4 /*JS_THROW_ERROR_ITERATOR_THROW*/) + JS_ThrowTypeError(ctx, + "iterator does not have a throw method"); + else + JS_ThrowInternalError(ctx, + "invalid throw var type %d", type); + return -1; +} + +/* ---- eval ---- */ + +int jit_op_eval(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int call_argc; + JSValue *call_argv; + JSValue ret_val; + JSValue obj; + int scope_idx; + int i; + call_argc = get_u16(pc_arg); + scope_idx = get_u16(pc_arg + 2) - 1; + call_argv = sp - call_argc; + if (js_same_value(ctx, call_argv[-1], ctx->eval_obj)) { + if (call_argc >= 1) + obj = call_argv[0]; + else + obj = JS_UNDEFINED; + ret_val = JS_EvalObject(ctx, JS_UNDEFINED, obj, + JS_EVAL_TYPE_DIRECT, scope_idx); + } else { + ret_val = JS_CallInternal(ctx, call_argv[-1], JS_UNDEFINED, + JS_UNDEFINED, call_argc, + vc(call_argv), 0); + } + if (unlikely(JS_IsException(ret_val))) { + aux->sp = sp; + return -1; + } + for (i = -1; i < call_argc; i++) + JS_FreeValue(ctx, call_argv[i]); + sp -= call_argc + 1; + *sp++ = ret_val; + aux->sp = sp; + return 0; +} + +int jit_op_apply_eval(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int scope_idx; + uint32_t len; + JSValue *tab; + JSValue obj; + JSValue ret_val; + scope_idx = get_u16(pc_arg) - 1; + tab = build_arg_list(ctx, &len, sp[-1]); + if (!tab) { + aux->sp = sp; + return -1; + } + if (js_same_value(ctx, sp[-2], ctx->eval_obj)) { + if (len >= 1) + obj = tab[0]; + else + obj = JS_UNDEFINED; + ret_val = JS_EvalObject(ctx, JS_UNDEFINED, obj, + JS_EVAL_TYPE_DIRECT, scope_idx); + } else { + ret_val = JS_Call(ctx, sp[-2], JS_UNDEFINED, len, + vc(tab)); + } + free_arg_list(ctx, tab, len); + if (unlikely(JS_IsException(ret_val))) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-2]); + JS_FreeValue(ctx, sp[-1]); + sp -= 2; + *sp++ = ret_val; + aux->sp = sp; + return 0; +} + +/* ---- misc ---- */ + +int jit_op_regexp(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + sp[-2] = js_regexp_constructor_internal(ctx, JS_UNDEFINED, + sp[-2], sp[-1]); + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_get_super(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue proto; + proto = JS_GetPrototype(ctx, sp[-1]); + if (JS_IsException(proto)) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-1]); + sp[-1] = proto; + aux->sp = sp; + return 0; +} + +int jit_op_import(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue val; + val = js_dynamic_import(ctx, sp[-2], sp[-1]); + if (JS_IsException(val)) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-2]); + JS_FreeValue(ctx, sp[-1]); + sp--; + sp[-1] = val; + aux->sp = sp; + return 0; +} + +/* ---- exception handling ---- */ + +int jit_op_catch(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSFunctionBytecode *b = (JSFunctionBytecode *)aux->b; + int32_t diff = get_u32(pc_arg); + (void)ctx; + *sp++ = JS_NewCatchOffset(ctx, (int)(pc_arg + diff - b->byte_code_buf)); + aux->sp = sp; + return 0; +} + +int jit_op_nip_catch(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue ret_val; + ret_val = *--sp; + while (sp > aux->stack_buf && + JS_VALUE_GET_TAG(sp[-1]) != JS_TAG_CATCH_OFFSET) { + JS_FreeValue(ctx, *--sp); + } + if (unlikely(sp == aux->stack_buf)) { + JS_ThrowInternalError(ctx, "nip_catch"); + JS_FreeValue(ctx, ret_val); + aux->sp = sp; + return -1; + } + sp[-1] = ret_val; + aux->sp = sp; + return 0; +} + +/* ---- iterators ---- */ + +int jit_op_for_in_start(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_for_in_start(ctx, sp)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_for_in_next(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_for_in_next(ctx, sp)) { + aux->sp = sp; + return -1; + } + sp += 2; + aux->sp = sp; + return 0; +} + +int jit_op_for_of_start(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_for_of_start(ctx, sp, false)) { + aux->sp = sp; + return -1; + } + sp += 1; + *sp++ = JS_NewCatchOffset(ctx, 0); + aux->sp = sp; + return 0; +} + +int jit_op_for_await_of_start(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_for_of_start(ctx, sp, true)) { + aux->sp = sp; + return -1; + } + sp += 1; + *sp++ = JS_NewCatchOffset(ctx, 0); + aux->sp = sp; + return 0; +} + +int jit_op_for_of_next(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int offset = -3 - pc_arg[0]; + if (js_for_of_next(ctx, sp, offset)) { + aux->sp = sp; + return -1; + } + sp += 2; + aux->sp = sp; + return 0; +} + +int jit_op_iterator_get_value_done(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_iterator_get_value_done(ctx, sp)) { + aux->sp = sp; + return -1; + } + sp += 1; + aux->sp = sp; + return 0; +} + +int jit_op_iterator_check_object(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (unlikely(!JS_IsObject(sp[-1]))) { + JS_ThrowTypeError(ctx, "iterator must return an object"); + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_iterator_close(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + sp--; /* drop the catch offset */ + JS_FreeValue(ctx, sp[-1]); /* drop the next method */ + sp--; + if (!JS_IsUndefined(sp[-1])) { + if (JS_IteratorClose(ctx, sp[-1], false)) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-1]); + } + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_iterator_next(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue ret; + ret = JS_Call(ctx, sp[-3], sp[-4], 1, vc(sp - 1)); + if (JS_IsException(ret)) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-1]); + sp[-1] = ret; + aux->sp = sp; + return 0; +} + +int jit_op_iterator_call(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSValue method, ret; + bool ret_flag; + int flags; + flags = pc_arg[0]; + method = JS_GetProperty(ctx, sp[-4], (flags & 1) ? + JS_ATOM_throw : JS_ATOM_return); + if (JS_IsException(method)) { + aux->sp = sp; + return -1; + } + if (JS_IsUndefined(method) || JS_IsNull(method)) { + ret_flag = true; + } else { + if (flags & 2) { + ret = JS_CallFree(ctx, method, sp[-4], 0, NULL); + } else { + ret = JS_CallFree(ctx, method, sp[-4], 1, + vc(sp - 1)); + } + if (JS_IsException(ret)) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-1]); + sp[-1] = ret; + ret_flag = false; + } + sp[0] = js_bool(ret_flag); + sp += 1; + aux->sp = sp; + return 0; +} + +/* ---- logical not ---- */ + +int jit_op_lnot(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + int res; + JSValue op1; + op1 = sp[-1]; + if ((uint32_t)JS_VALUE_GET_TAG(op1) <= JS_TAG_UNDEFINED) { + res = JS_VALUE_GET_INT(op1) != 0; + } else { + res = JS_ToBoolFree(ctx, op1); + } + sp[-1] = js_bool(!res); + aux->sp = sp; + return 0; +} + +/* ---- property access: get_field / get_field2 ---- */ + +int jit_op_get_field(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSValue val, obj; + JSAtom atom; + JSObject *fp; + JSProperty *pr; + JSShapeProperty *prs; + atom = get_u32(pc_arg); + obj = sp[-1]; + if (likely(JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT)) { + fp = JS_VALUE_GET_OBJ(obj); + for(;;) { + prs = find_own_property(&pr, fp, atom); + if (prs) { + if (unlikely(prs->flags & JS_PROP_TMASK)) + goto get_field_slow; + val = js_dup(pr->u.value); + break; + } + if (unlikely(fp->is_exotic)) { + obj = JS_MKPTR(JS_TAG_OBJECT, fp); + goto get_field_slow; + } + fp = fp->shape->proto; + if (!fp) { + val = JS_UNDEFINED; + break; + } + } + } else { + get_field_slow: + val = JS_GetPropertyInternal(ctx, obj, atom, sp[-1], false); + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + } + JS_FreeValue(ctx, sp[-1]); + sp[-1] = val; + aux->sp = sp; + return 0; +} + +int jit_op_get_field2(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSValue val, obj; + JSAtom atom; + JSObject *fp; + JSProperty *pr; + JSShapeProperty *prs; + atom = get_u32(pc_arg); + obj = sp[-1]; + if (likely(JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT)) { + fp = JS_VALUE_GET_OBJ(obj); + for(;;) { + prs = find_own_property(&pr, fp, atom); + if (prs) { + if (unlikely(prs->flags & JS_PROP_TMASK)) + goto get_field2_slow; + val = js_dup(pr->u.value); + break; + } + if (unlikely(fp->is_exotic)) { + obj = JS_MKPTR(JS_TAG_OBJECT, fp); + goto get_field2_slow; + } + fp = fp->shape->proto; + if (!fp) { + val = JS_UNDEFINED; + break; + } + } + } else { + get_field2_slow: + val = JS_GetPropertyInternal(ctx, obj, atom, sp[-1], false); + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + } + *sp++ = val; + aux->sp = sp; + return 0; +} + +int jit_op_put_field(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int ret; + JSValue obj; + JSAtom atom; + JSObject *fp; + JSProperty *pr; + JSShapeProperty *prs; + atom = get_u32(pc_arg); + obj = sp[-2]; + if (likely(JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT)) { + fp = JS_VALUE_GET_OBJ(obj); + prs = find_own_property(&pr, fp, atom); + if (!prs) + goto put_field_slow; + if (likely((prs->flags & + (JS_PROP_TMASK | JS_PROP_WRITABLE | + JS_PROP_LENGTH)) == JS_PROP_WRITABLE)) { + set_value(ctx, &pr->u.value, sp[-1]); + } else { + goto put_field_slow; + } + JS_FreeValue(ctx, obj); + sp -= 2; + } else { + put_field_slow: + ret = JS_SetPropertyInternal2(ctx, obj, atom, sp[-1], + obj, + JS_PROP_THROW_STRICT); + JS_FreeValue(ctx, obj); + sp -= 2; + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + } + aux->sp = sp; + return 0; +} + +void jit_get_ic_layout(JitICLayout *out) +{ + out->obj_shape_off = (int)offsetof(JSObject, shape); + out->obj_prop_off = (int)offsetof(JSObject, prop); + out->prop_size = (int)sizeof(JSProperty); +} + +int jit_op_get_field_ic(JSContext *ctx, JitAux *aux, const uint8_t *pc, PropIC *ic) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSValue val, obj; + JSAtom atom; + JSObject *fp; + JSProperty *pr; + JSShapeProperty *prs; + atom = get_u32(pc_arg); + obj = sp[-1]; + if (likely(JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT)) { + fp = JS_VALUE_GET_OBJ(obj); + /* IC fast path: shape pointer match is sufficient because we pin + the shape (ref_count++) — any in-place mutation clones instead */ + if (likely(ic->cached_shape == (void *)fp->shape)) { + val = js_dup(fp->prop[ic->cached_offset].u.value); + JS_FreeValue(ctx, sp[-1]); + sp[-1] = val; + aux->sp = sp; + return 0; + } + /* IC slow path: full lookup, update cache on own-property hit */ + for(;;) { + prs = find_own_property(&pr, fp, atom); + if (prs) { + if (unlikely(prs->flags & JS_PROP_TMASK)) + goto get_field_ic_slow; + val = js_dup(pr->u.value); + if (fp == JS_VALUE_GET_OBJ(obj)) { + JSShape *new_sh = fp->shape; + if (ic->cached_shape) + js_free_shape(ctx->rt, (JSShape *)ic->cached_shape); + ic->cached_shape = (void *)js_dup_shape(new_sh); + ic->cached_offset = (uint32_t)(pr - fp->prop); + } + break; + } + if (unlikely(fp->is_exotic)) { + obj = JS_MKPTR(JS_TAG_OBJECT, fp); + goto get_field_ic_slow; + } + fp = fp->shape->proto; + if (!fp) { + val = JS_UNDEFINED; + break; + } + } + } else { + get_field_ic_slow: + val = JS_GetPropertyInternal(ctx, obj, atom, sp[-1], false); + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + } + JS_FreeValue(ctx, sp[-1]); + sp[-1] = val; + aux->sp = sp; + return 0; +} + +int jit_op_get_field2_ic(JSContext *ctx, JitAux *aux, const uint8_t *pc, PropIC *ic) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSValue val, obj; + JSAtom atom; + JSObject *fp; + JSProperty *pr; + JSShapeProperty *prs; + atom = get_u32(pc_arg); + obj = sp[-1]; + if (likely(JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT)) { + fp = JS_VALUE_GET_OBJ(obj); + if (likely(ic->cached_shape == (void *)fp->shape)) { + val = js_dup(fp->prop[ic->cached_offset].u.value); + *sp++ = val; + aux->sp = sp; + return 0; + } + for(;;) { + prs = find_own_property(&pr, fp, atom); + if (prs) { + if (unlikely(prs->flags & JS_PROP_TMASK)) + goto get_field2_ic_slow; + val = js_dup(pr->u.value); + if (fp == JS_VALUE_GET_OBJ(obj)) { + JSShape *new_sh = fp->shape; + if (ic->cached_shape) + js_free_shape(ctx->rt, (JSShape *)ic->cached_shape); + ic->cached_shape = (void *)js_dup_shape(new_sh); + ic->cached_offset = (uint32_t)(pr - fp->prop); + } + break; + } + if (unlikely(fp->is_exotic)) { + obj = JS_MKPTR(JS_TAG_OBJECT, fp); + goto get_field2_ic_slow; + } + fp = fp->shape->proto; + if (!fp) { + val = JS_UNDEFINED; + break; + } + } + } else { + get_field2_ic_slow: + val = JS_GetPropertyInternal(ctx, obj, atom, sp[-1], false); + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + } + *sp++ = val; + aux->sp = sp; + return 0; +} + +int jit_op_put_field_ic_hit(JSContext *ctx, JitAux *aux, PropIC *ic) +{ + JSValue *sp = aux->sp; + JSValue obj = sp[-2]; + JSObject *fp = JS_VALUE_GET_OBJ(obj); + JSProperty *pr = &fp->prop[ic->cached_offset]; + set_value(ctx, &pr->u.value, sp[-1]); + JS_FreeValue(ctx, obj); + sp -= 2; + aux->sp = sp; + return 0; +} + +int jit_op_put_field_ic(JSContext *ctx, JitAux *aux, const uint8_t *pc, PropIC *ic) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int ret; + JSValue obj; + JSAtom atom; + JSObject *fp; + JSProperty *pr; + JSShapeProperty *prs; + atom = get_u32(pc_arg); + obj = sp[-2]; + if (likely(JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT)) { + fp = JS_VALUE_GET_OBJ(obj); + if (likely(ic->cached_shape == (void *)fp->shape)) { + pr = &fp->prop[ic->cached_offset]; + set_value(ctx, &pr->u.value, sp[-1]); + JS_FreeValue(ctx, obj); + sp -= 2; + aux->sp = sp; + return 0; + } + prs = find_own_property(&pr, fp, atom); + if (!prs) + goto put_field_ic_slow; + if (likely((prs->flags & + (JS_PROP_TMASK | JS_PROP_WRITABLE | + JS_PROP_LENGTH)) == JS_PROP_WRITABLE)) { + set_value(ctx, &pr->u.value, sp[-1]); + { + JSShape *new_sh = fp->shape; + if (ic->cached_shape) + js_free_shape(ctx->rt, (JSShape *)ic->cached_shape); + ic->cached_shape = (void *)js_dup_shape(new_sh); + ic->cached_offset = (uint32_t)(pr - fp->prop); + } + } else { + goto put_field_ic_slow; + } + JS_FreeValue(ctx, obj); + sp -= 2; + } else { + put_field_ic_slow: + ret = JS_SetPropertyInternal2(ctx, obj, atom, sp[-1], + obj, + JS_PROP_THROW_STRICT); + JS_FreeValue(ctx, obj); + sp -= 2; + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + } + aux->sp = sp; + return 0; +} + +int jit_op_get_length(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue val, obj; + JSAtom atom; + JSObject *fp; + JSProperty *pr; + JSShapeProperty *prs; + atom = JS_ATOM_length; + obj = sp[-1]; + if (likely(JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT)) { + fp = JS_VALUE_GET_OBJ(obj); + for(;;) { + prs = find_own_property(&pr, fp, atom); + if (prs) { + if (unlikely(prs->flags & JS_PROP_TMASK)) + goto get_length_slow; + val = js_dup(pr->u.value); + break; + } + if (unlikely(fp->is_exotic)) { + obj = JS_MKPTR(JS_TAG_OBJECT, fp); + goto get_length_slow; + } + fp = fp->shape->proto; + if (!fp) { + val = JS_UNDEFINED; + break; + } + } + } else { + get_length_slow: + val = JS_GetPropertyInternal(ctx, obj, atom, sp[-1], false); + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + } + JS_FreeValue(ctx, sp[-1]); + sp[-1] = val; + aux->sp = sp; + return 0; +} + +/* ---- private fields ---- */ + +int jit_op_private_symbol(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSAtom atom; + JSValue val; + atom = get_u32(pc_arg); + val = JS_NewSymbolFromAtom(ctx, atom, JS_ATOM_TYPE_PRIVATE); + if (JS_IsException(val)) { + aux->sp = sp; + return -1; + } + *sp++ = val; + aux->sp = sp; + return 0; +} + +int jit_op_get_private_field(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue val; + val = JS_GetPrivateField(ctx, sp[-2], sp[-1]); + JS_FreeValue(ctx, sp[-1]); + JS_FreeValue(ctx, sp[-2]); + sp[-2] = val; + sp--; + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_put_private_field(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + int ret; + ret = JS_SetPrivateField(ctx, sp[-3], sp[-1], sp[-2]); + JS_FreeValue(ctx, sp[-3]); + JS_FreeValue(ctx, sp[-1]); + sp -= 3; + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_define_private_field(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + int ret; + ret = JS_DefinePrivateField(ctx, sp[-3], sp[-2], sp[-1]); + JS_FreeValue(ctx, sp[-2]); + sp -= 2; + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +/* ---- array element access ---- */ + +int jit_op_get_array_el(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue val; + val = JS_GetPropertyValue(ctx, sp[-2], sp[-1]); + JS_FreeValue(ctx, sp[-2]); + sp[-2] = val; + sp--; + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_get_array_el2(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue val; + val = JS_GetPropertyValue(ctx, sp[-2], sp[-1]); + sp[-1] = val; + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_get_ref_value(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue val; + if (unlikely(JS_IsUndefined(sp[-2]))) { + JSAtom atom = JS_ValueToAtom(ctx, sp[-1]); + if (atom != JS_ATOM_NULL) { + JS_ThrowReferenceErrorNotDefined(ctx, atom); + JS_FreeAtom(ctx, atom); + } + aux->sp = sp; + return -1; + } + val = JS_GetPropertyValue(ctx, sp[-2], js_dup(sp[-1])); + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + sp[0] = val; + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_get_super_value(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue val; + JSAtom atom; + atom = JS_ValueToAtom(ctx, sp[-1]); + if (unlikely(atom == JS_ATOM_NULL)) { + aux->sp = sp; + return -1; + } + val = JS_GetPropertyInternal(ctx, sp[-2], atom, sp[-3], false); + JS_FreeAtom(ctx, atom); + if (unlikely(JS_IsException(val))) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-1]); + JS_FreeValue(ctx, sp[-2]); + JS_FreeValue(ctx, sp[-3]); + sp[-3] = val; + sp -= 2; + aux->sp = sp; + return 0; +} + +int jit_op_put_array_el(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + int ret; + JSValue val; + uint32_t idx; + JSObject *ap; + val = sp[-1]; + if (likely(JS_VALUE_GET_TAG(sp[-2]) == JS_TAG_INT)) { + idx = JS_VALUE_GET_INT(sp[-2]); + if (likely(JS_VALUE_GET_TAG(sp[-3]) == JS_TAG_OBJECT)) { + ap = JS_VALUE_GET_OBJ(sp[-3]); + if (likely(ap->class_id == JS_CLASS_ARRAY && + idx < (uint32_t)ap->u.array.count)) { + set_value(ctx, &ap->u.array.u.values[idx], val); + JS_FreeValue(ctx, sp[-3]); + sp -= 3; + aux->sp = sp; + return 0; + } + if (likely(ap->class_id == JS_CLASS_ARRAY && + idx == (uint32_t)ap->u.array.count && + ap->fast_array && + ap->extensible && + ap->shape->proto == JS_VALUE_GET_OBJ(ctx->class_proto[JS_CLASS_ARRAY]) && + ctx->std_array_prototype)) { + uint32_t array_len; + if (likely(JS_VALUE_GET_TAG(ap->prop[0].u.value) == JS_TAG_INT)) { + uint32_t new_len = idx + 1; + array_len = JS_VALUE_GET_INT(ap->prop[0].u.value); + if (likely(new_len <= ap->u.array.u1.size)) { + ap->u.array.u.values[idx] = val; + ap->u.array.count = new_len; + if (new_len > array_len) + ap->prop[0].u.value = js_int32(new_len); + JS_FreeValue(ctx, sp[-3]); + sp -= 3; + aux->sp = sp; + return 0; + } + } + } + } + } + ret = JS_SetPropertyValue(ctx, sp[-3], sp[-2], sp[-1], + JS_PROP_THROW_STRICT); + JS_FreeValue(ctx, sp[-3]); + sp -= 3; + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_put_ref_value(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + int ret, flags; + flags = JS_PROP_THROW_STRICT; + if (unlikely(JS_IsUndefined(sp[-3]))) { + if (is_strict_mode(ctx)) { + JSAtom atom = JS_ValueToAtom(ctx, sp[-2]); + if (atom != JS_ATOM_NULL) { + JS_ThrowReferenceErrorNotDefined(ctx, atom); + JS_FreeAtom(ctx, atom); + } + aux->sp = sp; + return -1; + } else { + sp[-3] = js_dup(ctx->global_obj); + } + } else { + if (is_strict_mode(ctx)) + flags |= JS_PROP_NO_ADD; + } + ret = JS_SetPropertyValue(ctx, sp[-3], sp[-2], sp[-1], flags); + JS_FreeValue(ctx, sp[-3]); + sp -= 3; + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_put_super_value(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + int ret; + JSAtom atom; + if (JS_VALUE_GET_TAG(sp[-3]) != JS_TAG_OBJECT) { + JS_ThrowTypeErrorNotAnObject(ctx); + aux->sp = sp; + return -1; + } + atom = JS_ValueToAtom(ctx, sp[-2]); + if (unlikely(atom == JS_ATOM_NULL)) { + aux->sp = sp; + return -1; + } + ret = JS_SetPropertyInternal2(ctx, sp[-3], atom, + sp[-1], sp[-4], + JS_PROP_THROW_STRICT); + JS_FreeAtom(ctx, atom); + JS_FreeValue(ctx, sp[-4]); + JS_FreeValue(ctx, sp[-3]); + JS_FreeValue(ctx, sp[-2]); + sp -= 4; + if (ret < 0) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_define_array_el(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + int ret; + ret = JS_DefinePropertyValueValue(ctx, sp[-3], + js_dup(sp[-2]), sp[-1], + JS_PROP_C_W_E | JS_PROP_THROW); + sp -= 1; + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_append(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_append_enumerate(ctx, sp)) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, *--sp); + aux->sp = sp; + return 0; +} + +int jit_op_copy_data_properties(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int mask; + mask = pc_arg[0]; + if (JS_CopyDataProperties(ctx, sp[-1 - (mask & 3)], + sp[-1 - ((mask >> 2) & 7)], + sp[-1 - ((mask >> 5) & 7)], 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +/* ---- define field / name / proto / home ---- */ + +int jit_op_define_field(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int ret; + JSAtom atom; + atom = get_u32(pc_arg); + ret = JS_DefinePropertyValue(ctx, sp[-2], atom, sp[-1], + JS_PROP_C_W_E | JS_PROP_THROW); + sp--; + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_set_name(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + int ret; + JSAtom atom; + atom = get_u32(pc_arg); + ret = JS_DefineObjectName(ctx, sp[-1], atom, + JS_PROP_CONFIGURABLE); + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_set_name_computed(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + int ret; + ret = JS_DefineObjectNameComputed(ctx, sp[-1], sp[-2], + JS_PROP_CONFIGURABLE); + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +int jit_op_set_proto(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue proto; + proto = sp[-1]; + if (JS_IsObject(proto) || JS_IsNull(proto)) { + if (JS_SetPrototypeInternal(ctx, sp[-2], proto, true) < 0) { + aux->sp = sp; + return -1; + } + } + JS_FreeValue(ctx, proto); + sp--; + aux->sp = sp; + return 0; +} + +int jit_op_set_home_object(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + js_method_set_home_object(ctx, sp[-1], sp[-2]); + aux->sp = sp; + return 0; +} + +/* ---- define method ---- */ + +int jit_op_define_method(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + OPCodeEnum opcode = (OPCodeEnum)*pc; + JSValue getter, setter, value; + JSValue obj; + JSAtom atom; + int flags, ret, op_flags; + bool is_computed; + is_computed = (opcode == OP_define_method_computed); + if (is_computed) { + atom = JS_ValueToAtom(ctx, sp[-2]); + if (unlikely(atom == JS_ATOM_NULL)) + goto jit_exception; + op_flags = pc_arg[0]; + } else { + atom = get_u32(pc_arg); + op_flags = pc_arg[4]; + } + obj = sp[-2 - is_computed]; + flags = JS_PROP_HAS_CONFIGURABLE | JS_PROP_CONFIGURABLE | + JS_PROP_HAS_ENUMERABLE | JS_PROP_THROW; + if (op_flags & 4 /*OP_DEFINE_METHOD_ENUMERABLE*/) + flags |= JS_PROP_ENUMERABLE; + op_flags &= 3; + value = JS_UNDEFINED; + getter = JS_UNDEFINED; + setter = JS_UNDEFINED; + if (op_flags == 0 /*OP_DEFINE_METHOD_METHOD*/) { + value = sp[-1]; + flags |= JS_PROP_HAS_VALUE | JS_PROP_HAS_WRITABLE | + JS_PROP_WRITABLE; + } else if (op_flags == 1 /*OP_DEFINE_METHOD_GETTER*/) { + getter = sp[-1]; + flags |= JS_PROP_HAS_GET; + } else { + setter = sp[-1]; + flags |= JS_PROP_HAS_SET; + } + ret = js_method_set_properties(ctx, sp[-1], atom, flags, obj); + if (ret >= 0) { + ret = JS_DefineProperty(ctx, obj, atom, value, + getter, setter, flags); + } + JS_FreeValue(ctx, sp[-1]); + if (is_computed) { + JS_FreeAtom(ctx, atom); + JS_FreeValue(ctx, sp[-2]); + } + sp -= 1 + is_computed; + if (unlikely(ret < 0)) + goto jit_exception; + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +/* ---- define class ---- */ + +int jit_op_define_class(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + OPCodeEnum opcode = (OPCodeEnum)*pc; + JSStackFrame *sf = (JSStackFrame *)aux->sf; + JSVarRef **var_refs = (JSVarRef **)aux->var_refs; + int class_flags; + JSAtom atom; + atom = get_u32(pc_arg); + class_flags = pc_arg[4]; + if (js_op_define_class(ctx, sp, atom, class_flags, + var_refs, sf, + (opcode == OP_define_class_computed)) < 0) { + aux->sp = sp; + return -1; + } + aux->sp = sp; + return 0; +} + +/* ---- arithmetic ---- */ + +int jit_op_add(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int64_t r; + r = (int64_t)JS_VALUE_GET_INT(op1) + + JS_VALUE_GET_INT(op2); + if (unlikely(r < INT32_MIN || r > INT32_MAX)) + sp[-2] = js_float64(r); + else + sp[-2] = js_int32(r); + sp--; + } else if (JS_VALUE_IS_BOTH_FLOAT(op1, op2)) { + sp[-2] = js_float64(JS_VALUE_GET_FLOAT64(op1) + + JS_VALUE_GET_FLOAT64(op2)); + sp--; + } else { + if (js_add_slow(ctx, sp)) + goto jit_exception; + sp--; + } + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +int jit_op_add_loc(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSValue *var_buf = aux->var_buf; + JSValue *pv; + int idx; + idx = pc_arg[0]; + pv = &var_buf[idx]; + if (likely(JS_VALUE_IS_BOTH_INT(*pv, sp[-1]))) { + int64_t r; + r = (int64_t)JS_VALUE_GET_INT(*pv) + + JS_VALUE_GET_INT(sp[-1]); + if (unlikely((int)r != r)) + *pv = __JS_NewFloat64((double)r); + else + *pv = js_int32(r); + sp--; + } else if (JS_VALUE_GET_TAG(*pv) == JS_TAG_STRING) { + JSValue op1; + op1 = sp[-1]; + sp--; + op1 = JS_ToPrimitiveFree(ctx, op1, HINT_NONE); + if (JS_IsException(op1)) + goto jit_exception; + op1 = JS_ConcatString(ctx, js_dup(*pv), op1); + if (JS_IsException(op1)) + goto jit_exception; + set_value(ctx, pv, op1); + } else { + JSValue ops[2]; + ops[0] = js_dup(*pv); + ops[1] = sp[-1]; + sp--; + if (js_add_slow(ctx, ops + 2)) + goto jit_exception; + set_value(ctx, pv, ops[0]); + } + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +int jit_op_sub(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int64_t r; + r = (int64_t)JS_VALUE_GET_INT(op1) - + JS_VALUE_GET_INT(op2); + if (unlikely((int)r != r)) + sp[-2] = __JS_NewFloat64((double)r); + else + sp[-2] = js_int32(r); + sp--; + } else if (JS_VALUE_IS_BOTH_FLOAT(op1, op2)) { + sp[-2] = js_float64(JS_VALUE_GET_FLOAT64(op1) - + JS_VALUE_GET_FLOAT64(op2)); + sp--; + } else { + if (js_binary_arith_slow(ctx, sp, OP_sub)) + goto jit_exception; + sp--; + } + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +int jit_op_mul(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + double d; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int32_t v1, v2; + int64_t r; + v1 = JS_VALUE_GET_INT(op1); + v2 = JS_VALUE_GET_INT(op2); + r = (int64_t)v1 * v2; + if (unlikely((int)r != r)) { + d = (double)r; + sp[-2] = js_float64(d); + sp--; + } else if (unlikely(r == 0 && (v1 | v2) < 0)) { + d = -0.0; + sp[-2] = js_float64(d); + sp--; + } else { + sp[-2] = js_int32(r); + sp--; + } + } else if (JS_VALUE_IS_BOTH_FLOAT(op1, op2)) { + d = JS_VALUE_GET_FLOAT64(op1) * + JS_VALUE_GET_FLOAT64(op2); + sp[-2] = js_float64(d); + sp--; + } else { + if (js_binary_arith_slow(ctx, sp, OP_mul)) + goto jit_exception; + sp--; + } + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +int jit_op_div(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int v1, v2; + v1 = JS_VALUE_GET_INT(op1); + v2 = JS_VALUE_GET_INT(op2); + sp[-2] = js_number((double)v1 / (double)v2); + sp--; + } else { + if (js_binary_arith_slow(ctx, sp, OP_div)) + goto jit_exception; + sp--; + } + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +int jit_op_mod(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int v1, v2, r; + v1 = JS_VALUE_GET_INT(op1); + v2 = JS_VALUE_GET_INT(op2); + if (unlikely(v1 < 0 || v2 <= 0)) { + if (js_binary_arith_slow(ctx, sp, OP_mod)) + goto jit_exception; + sp--; + } else { + r = v1 % v2; + sp[-2] = js_int32(r); + sp--; + } + } else { + if (js_binary_arith_slow(ctx, sp, OP_mod)) + goto jit_exception; + sp--; + } + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +int jit_op_pow(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_binary_arith_slow(ctx, sp, OP_pow)) { + aux->sp = sp; + return -1; + } + sp--; + aux->sp = sp; + return 0; +} + +/* ---- unary arithmetic ---- */ + +int jit_op_plus(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1; + uint32_t tag; + op1 = sp[-1]; + tag = JS_VALUE_GET_TAG(op1); + if (tag == JS_TAG_INT || JS_TAG_IS_FLOAT64(tag)) { + /* nothing */ + } else { + if (js_unary_arith_slow(ctx, sp, OP_plus)) { + aux->sp = sp; + return -1; + } + } + aux->sp = sp; + return 0; +} + +int jit_op_neg(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1; + uint32_t tag; + int val; + double d; + op1 = sp[-1]; + tag = JS_VALUE_GET_TAG(op1); + if (tag == JS_TAG_INT) { + val = JS_VALUE_GET_INT(op1); + if (unlikely(val == 0)) { + d = -0.0; + sp[-1] = js_float64(d); + } else if (unlikely(val == INT32_MIN)) { + d = -(double)val; + sp[-1] = js_float64(d); + } else { + sp[-1] = js_int32(-val); + } + } else if (JS_TAG_IS_FLOAT64(tag)) { + d = -JS_VALUE_GET_FLOAT64(op1); + sp[-1] = js_float64(d); + } else { + if (js_unary_arith_slow(ctx, sp, OP_neg)) { + aux->sp = sp; + return -1; + } + } + aux->sp = sp; + return 0; +} + +int jit_op_inc(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1; + int val; + op1 = sp[-1]; + if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { + val = JS_VALUE_GET_INT(op1); + if (unlikely(val == INT32_MAX)) { + if (js_unary_arith_slow(ctx, sp, OP_inc)) { + aux->sp = sp; + return -1; + } + } else { + sp[-1] = js_int32(val + 1); + } + } else { + if (js_unary_arith_slow(ctx, sp, OP_inc)) { + aux->sp = sp; + return -1; + } + } + aux->sp = sp; + return 0; +} + +int jit_op_dec(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1; + int val; + op1 = sp[-1]; + if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { + val = JS_VALUE_GET_INT(op1); + if (unlikely(val == INT32_MIN)) { + if (js_unary_arith_slow(ctx, sp, OP_dec)) { + aux->sp = sp; + return -1; + } + } else { + sp[-1] = js_int32(val - 1); + } + } else { + if (js_unary_arith_slow(ctx, sp, OP_dec)) { + aux->sp = sp; + return -1; + } + } + aux->sp = sp; + return 0; +} + +int jit_op_post_inc(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1; + int val; + op1 = sp[-1]; + if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { + val = JS_VALUE_GET_INT(op1); + if (unlikely(val == INT32_MAX)) { + if (js_post_inc_slow(ctx, sp, OP_post_inc)) { + aux->sp = sp; + return -1; + } + } else { + sp[0] = js_int32(val + 1); + } + } else { + if (js_post_inc_slow(ctx, sp, OP_post_inc)) { + aux->sp = sp; + return -1; + } + } + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_post_dec(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1; + int val; + op1 = sp[-1]; + if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { + val = JS_VALUE_GET_INT(op1); + if (unlikely(val == INT32_MIN)) { + if (js_post_inc_slow(ctx, sp, OP_post_dec)) { + aux->sp = sp; + return -1; + } + } else { + sp[0] = js_int32(val - 1); + } + } else { + if (js_post_inc_slow(ctx, sp, OP_post_dec)) { + aux->sp = sp; + return -1; + } + } + sp++; + aux->sp = sp; + return 0; +} + +int jit_op_inc_loc(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + const uint8_t *pc_arg = pc + 1; + JSValue *var_buf = aux->var_buf; + JSValue op1; + int val; + int idx; + idx = pc_arg[0]; + op1 = var_buf[idx]; + if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { + val = JS_VALUE_GET_INT(op1); + if (unlikely(val == INT32_MAX)) { + op1 = js_dup(op1); + if (js_unary_arith_slow(ctx, &op1 + 1, OP_inc)) + return -1; + set_value(ctx, &var_buf[idx], op1); + } else { + var_buf[idx] = js_int32(val + 1); + } + } else { + op1 = js_dup(op1); + if (js_unary_arith_slow(ctx, &op1 + 1, OP_inc)) + return -1; + set_value(ctx, &var_buf[idx], op1); + } + return 0; +} + +int jit_op_dec_loc(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + const uint8_t *pc_arg = pc + 1; + JSValue *var_buf = aux->var_buf; + JSValue op1; + int val; + int idx; + idx = pc_arg[0]; + op1 = var_buf[idx]; + if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { + val = JS_VALUE_GET_INT(op1); + if (unlikely(val == INT32_MIN)) { + op1 = js_dup(op1); + if (js_unary_arith_slow(ctx, &op1 + 1, OP_dec)) + return -1; + set_value(ctx, &var_buf[idx], op1); + } else { + var_buf[idx] = js_int32(val - 1); + } + } else { + op1 = js_dup(op1); + if (js_unary_arith_slow(ctx, &op1 + 1, OP_dec)) + return -1; + set_value(ctx, &var_buf[idx], op1); + } + return 0; +} + +int jit_op_not(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1; + op1 = sp[-1]; + if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { + sp[-1] = js_int32(~JS_VALUE_GET_INT(op1)); + } else { + if (js_not_slow(ctx, sp)) { + aux->sp = sp; + return -1; + } + } + aux->sp = sp; + return 0; +} + +/* ---- bitwise shifts / logic ---- */ + +int jit_op_shl(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + uint32_t v1, v2; + v1 = JS_VALUE_GET_INT(op1); + v2 = JS_VALUE_GET_INT(op2) & 0x1f; + sp[-2] = js_int32(v1 << v2); + sp--; + } else { + if (js_binary_logic_slow(ctx, sp, OP_shl)) { + aux->sp = sp; + return -1; + } + sp--; + } + aux->sp = sp; + return 0; +} + +int jit_op_shr(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + uint32_t v2; + v2 = JS_VALUE_GET_INT(op2); + v2 &= 0x1f; + sp[-2] = js_uint32((uint32_t)JS_VALUE_GET_INT(op1) >> v2); + sp--; + } else { + if (js_shr_slow(ctx, sp)) { + aux->sp = sp; + return -1; + } + sp--; + } + aux->sp = sp; + return 0; } -#define JS_DEFINE_CLASS_HAS_HERITAGE (1 << 0) - -static int js_op_define_class(JSContext *ctx, JSValue *sp, - JSAtom class_name, int class_flags, - JSVarRef **cur_var_refs, - JSStackFrame *sf, bool is_computed_name) +int jit_op_binary_logic(JSContext *ctx, JitAux *aux, int opcode) { - JSValue bfunc, parent_class, proto = JS_UNDEFINED; - JSValue ctor = JS_UNDEFINED, parent_proto = JS_UNDEFINED; - JSFunctionBytecode *b; - - parent_class = sp[-2]; - bfunc = sp[-1]; - - if (class_flags & JS_DEFINE_CLASS_HAS_HERITAGE) { - if (JS_IsNull(parent_class)) { - parent_proto = JS_NULL; - parent_class = js_dup(ctx->function_proto); - } else { - if (!JS_IsConstructor(ctx, parent_class)) { - JS_ThrowTypeError(ctx, "parent class must be constructor"); - goto fail; - } - parent_proto = JS_GetProperty(ctx, parent_class, JS_ATOM_prototype); - if (JS_IsException(parent_proto)) - goto fail; - if (!JS_IsNull(parent_proto) && !JS_IsObject(parent_proto)) { - JS_ThrowTypeError(ctx, "parent prototype must be an object or null"); - goto fail; + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int32_t v1 = JS_VALUE_GET_INT(op1); + int32_t v2 = JS_VALUE_GET_INT(op2); + int32_t r; + switch (opcode) { + case OP_sar: + { + uint32_t shift = v2; + if (unlikely(shift > 0x1f)) + shift &= 0x1f; + r = (int)v1 >> shift; } + break; + case OP_and: r = v1 & v2; break; + case OP_or: r = v1 | v2; break; + case OP_xor: r = v1 ^ v2; break; + default: abort(); r = 0; } + sp[-2] = js_int32(r); + sp--; } else { - /* parent_class is JS_UNDEFINED in this case */ - parent_proto = js_dup(ctx->class_proto[JS_CLASS_OBJECT]); - parent_class = js_dup(ctx->function_proto); + if (js_binary_logic_slow(ctx, sp, opcode)) { + aux->sp = sp; + return -1; + } + sp--; } - proto = JS_NewObjectProto(ctx, parent_proto); - if (JS_IsException(proto)) - goto fail; + aux->sp = sp; + return 0; +} - b = JS_VALUE_GET_PTR(bfunc); - assert(b->func_kind == JS_FUNC_NORMAL); - ctor = JS_NewObjectProtoClass(ctx, parent_class, - JS_CLASS_BYTECODE_FUNCTION); - if (JS_IsException(ctor)) - goto fail; - ctor = js_closure2(ctx, ctor, b, cur_var_refs, sf); - bfunc = JS_UNDEFINED; - if (JS_IsException(ctor)) - goto fail; - js_method_set_home_object(ctx, ctor, proto); - JS_SetConstructorBit(ctx, ctor, true); +/* ---- comparison operators ---- */ - JS_DefinePropertyValue(ctx, ctor, JS_ATOM_length, - js_int32(b->defined_arg_count), - JS_PROP_CONFIGURABLE); +int jit_op_relational(JSContext *ctx, JitAux *aux, int opcode) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int v1 = JS_VALUE_GET_INT(op1); + int v2 = JS_VALUE_GET_INT(op2); + int res; + switch (opcode) { + case OP_lt: res = v1 < v2; break; + case OP_lte: res = v1 <= v2; break; + case OP_gt: res = v1 > v2; break; + case OP_gte: res = v1 >= v2; break; + default: abort(); res = 0; + } + sp[-2] = js_bool(res); + sp--; + } else { + if (js_relational_slow(ctx, sp, opcode)) { + aux->sp = sp; + return -1; + } + sp--; + } + aux->sp = sp; + return 0; +} - if (is_computed_name) { - if (JS_DefineObjectNameComputed(ctx, ctor, sp[-3], - JS_PROP_CONFIGURABLE) < 0) - goto fail; +int jit_op_eq(JSContext *ctx, JitAux *aux, int opcode) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int res; + if (opcode == OP_eq) + res = JS_VALUE_GET_INT(op1) == JS_VALUE_GET_INT(op2); + else + res = JS_VALUE_GET_INT(op1) != JS_VALUE_GET_INT(op2); + sp[-2] = js_bool(res); + sp--; } else { - if (JS_DefineObjectName(ctx, ctor, class_name, JS_PROP_CONFIGURABLE) < 0) - goto fail; + if (js_eq_slow(ctx, sp, opcode - OP_eq)) { + aux->sp = sp; + return -1; + } + sp--; } + aux->sp = sp; + return 0; +} - /* the constructor property must be first. It can be overriden by - computed property names */ - if (JS_DefinePropertyValue(ctx, proto, JS_ATOM_constructor, - js_dup(ctor), - JS_PROP_CONFIGURABLE | - JS_PROP_WRITABLE | JS_PROP_THROW) < 0) - goto fail; - /* set the prototype property */ - if (JS_DefinePropertyValue(ctx, ctor, JS_ATOM_prototype, - js_dup(proto), JS_PROP_THROW) < 0) - goto fail; +int jit_op_strict_eq(JSContext *ctx, JitAux *aux, int opcode) +{ + JSValue *sp = aux->sp; + JSValue op1, op2; + op1 = sp[-2]; + op2 = sp[-1]; + if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { + int res; + if (opcode == OP_strict_eq) + res = JS_VALUE_GET_INT(op1) == JS_VALUE_GET_INT(op2); + else + res = JS_VALUE_GET_INT(op1) != JS_VALUE_GET_INT(op2); + sp[-2] = js_bool(res); + sp--; + } else { + if (js_strict_eq_slow(ctx, sp, opcode - OP_strict_eq)) { + aux->sp = sp; + return -1; + } + sp--; + } + aux->sp = sp; + return 0; +} - JS_FreeValue(ctx, parent_proto); - JS_FreeValue(ctx, parent_class); +/* ---- operators ---- */ - sp[-2] = ctor; - sp[-1] = proto; +int jit_op_in(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_operator_in(ctx, sp)) { + aux->sp = sp; + return -1; + } + sp--; + aux->sp = sp; return 0; - fail: - JS_FreeValue(ctx, parent_class); - JS_FreeValue(ctx, parent_proto); - JS_FreeValue(ctx, bfunc); - JS_FreeValue(ctx, proto); - JS_FreeValue(ctx, ctor); - sp[-2] = JS_UNDEFINED; - sp[-1] = JS_UNDEFINED; - return -1; } -static void close_var_ref(JSRuntime *rt, JSVarRef *var_ref) +int jit_op_private_in(JSContext *ctx, JitAux *aux) { - var_ref->value = js_dup(*var_ref->pvalue); - var_ref->pvalue = &var_ref->value; - /* the reference is no longer to a local variable */ - var_ref->is_detached = true; - add_gc_object(rt, &var_ref->header, JS_GC_OBJ_TYPE_VAR_REF); + JSValue *sp = aux->sp; + if (js_operator_private_in(ctx, sp)) { + aux->sp = sp; + return -1; + } + sp--; + aux->sp = sp; + return 0; } -static void close_var_refs(JSRuntime *rt, JSStackFrame *sf) +int jit_op_instanceof(JSContext *ctx, JitAux *aux) { - JSVarRef *var_ref; - int i; - - for (i = 0; i < sf->var_ref_count; i++) { - var_ref = sf->var_refs[i]; - if (var_ref) - close_var_ref(rt, var_ref); + JSValue *sp = aux->sp; + if (js_operator_instanceof(ctx, sp)) { + aux->sp = sp; + return -1; } + sp--; + aux->sp = sp; + return 0; } -static void close_lexical_var(JSContext *ctx, JSFunctionBytecode *b, - JSStackFrame *sf, int var_idx) +int jit_op_typeof(JSContext *ctx, JitAux *aux) { - JSVarRef *var_ref; - int var_ref_idx; + JSValue *sp = aux->sp; + JSValue op1; + JSAtom atom; + op1 = sp[-1]; + atom = js_operator_typeof(ctx, op1); + JS_FreeValue(ctx, op1); + sp[-1] = JS_AtomToString(ctx, atom); + aux->sp = sp; + return 0; +} - var_ref_idx = b->vardefs[b->arg_count + var_idx].var_ref_idx; - var_ref = sf->var_refs[var_ref_idx]; - if (var_ref) { - close_var_ref(ctx->rt, var_ref); - sf->var_refs[var_ref_idx] = NULL; +int jit_op_delete(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_operator_delete(ctx, sp)) { + aux->sp = sp; + return -1; } + sp--; + aux->sp = sp; + return 0; } -#define JS_CALL_FLAG_COPY_ARGV (1 << 1) -#define JS_CALL_FLAG_GENERATOR (1 << 2) - -static JSValue js_call_c_function(JSContext *ctx, JSValueConst func_obj, - JSValueConst this_obj, - int argc, JSValueConst *argv, int flags) +int jit_op_delete_var(JSContext *ctx, JitAux *aux, const uint8_t *pc) { - JSRuntime *rt = ctx->rt; - JSCFunctionType func; - JSObject *p; - JSStackFrame sf_s, *sf = &sf_s, *prev_sf; - JSValue ret_val; - JSValueConst *arg_buf; - int arg_count, i; - JSCFunctionEnum cproto; - - p = JS_VALUE_GET_OBJ(func_obj); - cproto = p->u.cfunc.cproto; - arg_count = p->u.cfunc.length; - - /* better to always check stack overflow */ - if (js_check_stack_overflow(rt, sizeof(arg_buf[0]) * arg_count)) - return JS_ThrowStackOverflow(ctx); - - prev_sf = rt->current_stack_frame; - sf->prev_frame = prev_sf; - rt->current_stack_frame = sf; - ctx = p->u.cfunc.realm; /* change the current realm */ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + JSAtom atom; + int ret; + atom = get_u32(pc_arg); + ret = JS_DeleteGlobalVar(ctx, atom); + if (unlikely(ret < 0)) { + aux->sp = sp; + return -1; + } + *sp++ = js_bool(ret); + aux->sp = sp; + return 0; +} - sf->is_strict_mode = false; - sf->cur_func = unsafe_unconst(func_obj); - sf->arg_count = argc; - arg_buf = argv; +/* ---- type conversion ---- */ - if (unlikely(argc < arg_count)) { - /* ensure that at least argc_count arguments are readable */ - arg_buf = alloca(sizeof(arg_buf[0]) * arg_count); - for(i = 0; i < argc; i++) - arg_buf[i] = argv[i]; - for(i = argc; i < arg_count; i++) - arg_buf[i] = JS_UNDEFINED; - sf->arg_count = arg_count; +int jit_op_to_object(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue ret_val; + if (JS_VALUE_GET_TAG(sp[-1]) != JS_TAG_OBJECT) { + ret_val = JS_ToObject(ctx, sp[-1]); + if (JS_IsException(ret_val)) { + aux->sp = sp; + return -1; + } + JS_FreeValue(ctx, sp[-1]); + sp[-1] = ret_val; } - sf->arg_buf = (JSValue *)arg_buf; + aux->sp = sp; + return 0; +} - func = p->u.cfunc.c_function; - switch(cproto) { - case JS_CFUNC_constructor: - case JS_CFUNC_constructor_or_func: - if (!(flags & JS_CALL_FLAG_CONSTRUCTOR)) { - if (cproto == JS_CFUNC_constructor) { - not_a_constructor: - ret_val = JS_ThrowTypeError(ctx, "must be called with new"); - break; - } else { - this_obj = JS_UNDEFINED; - } - } - /* here this_obj is new_target */ - /* fall thru */ - case JS_CFUNC_generic: - ret_val = func.generic(ctx, this_obj, argc, arg_buf); - break; - case JS_CFUNC_constructor_magic: - case JS_CFUNC_constructor_or_func_magic: - if (!(flags & JS_CALL_FLAG_CONSTRUCTOR)) { - if (cproto == JS_CFUNC_constructor_magic) { - goto not_a_constructor; - } else { - this_obj = JS_UNDEFINED; - } - } - /* fall thru */ - case JS_CFUNC_generic_magic: - ret_val = func.generic_magic(ctx, this_obj, argc, arg_buf, - p->u.cfunc.magic); - break; - case JS_CFUNC_getter: - ret_val = func.getter(ctx, this_obj); - break; - case JS_CFUNC_setter: - ret_val = func.setter(ctx, this_obj, arg_buf[0]); - break; - case JS_CFUNC_getter_magic: - ret_val = func.getter_magic(ctx, this_obj, p->u.cfunc.magic); - break; - case JS_CFUNC_setter_magic: - ret_val = func.setter_magic(ctx, this_obj, arg_buf[0], p->u.cfunc.magic); +int jit_op_to_propkey(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue ret_val; + switch (JS_VALUE_GET_TAG(sp[-1])) { + case JS_TAG_INT: + case JS_TAG_STRING: + case JS_TAG_SYMBOL: break; - case JS_CFUNC_f_f: - { - double d1; - - if (unlikely(JS_ToFloat64(ctx, &d1, arg_buf[0]))) { - ret_val = JS_EXCEPTION; - break; - } - ret_val = js_number(func.f_f(d1)); + default: + ret_val = JS_ToPropertyKey(ctx, sp[-1]); + if (JS_IsException(ret_val)) { + aux->sp = sp; + return -1; } + JS_FreeValue(ctx, sp[-1]); + sp[-1] = ret_val; break; - case JS_CFUNC_f_f_f: - { - double d1, d2; + } + aux->sp = sp; + return 0; +} - if (unlikely(JS_ToFloat64(ctx, &d1, arg_buf[0]))) { - ret_val = JS_EXCEPTION; - break; - } - if (unlikely(JS_ToFloat64(ctx, &d2, arg_buf[1]))) { - ret_val = JS_EXCEPTION; - break; - } - ret_val = js_number(func.f_f_f(d1, d2)); - } +int jit_op_to_propkey2(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue ret_val; + if (unlikely(JS_IsUndefined(sp[-2]) || JS_IsNull(sp[-2]))) { + JS_ThrowTypeError(ctx, "value has no property"); + aux->sp = sp; + return -1; + } + switch (JS_VALUE_GET_TAG(sp[-1])) { + case JS_TAG_INT: + case JS_TAG_STRING: + case JS_TAG_SYMBOL: break; - case JS_CFUNC_iterator_next: - { - int done; - ret_val = func.iterator_next(ctx, this_obj, argc, arg_buf, - &done, p->u.cfunc.magic); - if (!JS_IsException(ret_val) && done != 2) { - ret_val = js_create_iterator_result(ctx, ret_val, done); - } + default: + ret_val = JS_ToPropertyKey(ctx, sp[-1]); + if (JS_IsException(ret_val)) { + aux->sp = sp; + return -1; } + JS_FreeValue(ctx, sp[-1]); + sp[-1] = ret_val; break; - default: - abort(); } + aux->sp = sp; + return 0; +} - rt->current_stack_frame = sf->prev_frame; - return ret_val; +/* ---- type checks ---- */ + +int jit_op_is_undefined_or_null(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (JS_VALUE_GET_TAG(sp[-1]) == JS_TAG_UNDEFINED || + JS_VALUE_GET_TAG(sp[-1]) == JS_TAG_NULL) { + sp[-1] = JS_TRUE; + } else { + JS_FreeValue(ctx, sp[-1]); + sp[-1] = JS_FALSE; + } + aux->sp = sp; + return 0; } -static JSValue js_call_bound_function(JSContext *ctx, JSValueConst func_obj, - JSValueConst this_obj, - int argc, JSValueConst *argv, int flags) +int jit_op_is_undefined(JSContext *ctx, JitAux *aux) { - JSObject *p; - JSBoundFunction *bf; - JSValueConst *arg_buf, new_target; - int arg_count, i; + JSValue *sp = aux->sp; + if (JS_VALUE_GET_TAG(sp[-1]) == JS_TAG_UNDEFINED) { + sp[-1] = JS_TRUE; + } else { + JS_FreeValue(ctx, sp[-1]); + sp[-1] = JS_FALSE; + } + aux->sp = sp; + return 0; +} - p = JS_VALUE_GET_OBJ(func_obj); - bf = p->u.bound_function; - arg_count = bf->argc + argc; - if (js_check_stack_overflow(ctx->rt, sizeof(JSValue) * arg_count)) - return JS_ThrowStackOverflow(ctx); - arg_buf = alloca(sizeof(JSValue) * arg_count); - for(i = 0; i < bf->argc; i++) { - arg_buf[i] = bf->argv[i]; +int jit_op_is_null(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (JS_VALUE_GET_TAG(sp[-1]) == JS_TAG_NULL) { + sp[-1] = JS_TRUE; + } else { + JS_FreeValue(ctx, sp[-1]); + sp[-1] = JS_FALSE; } - for(i = 0; i < argc; i++) { - arg_buf[bf->argc + i] = argv[i]; + aux->sp = sp; + return 0; +} + +int jit_op_typeof_is_undefined(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_operator_typeof(ctx, sp[-1]) == JS_ATOM_undefined) { + JS_FreeValue(ctx, sp[-1]); + sp[-1] = JS_TRUE; + } else { + JS_FreeValue(ctx, sp[-1]); + sp[-1] = JS_FALSE; } - if (flags & JS_CALL_FLAG_CONSTRUCTOR) { - new_target = this_obj; - if (js_same_value(ctx, func_obj, new_target)) - new_target = bf->func_obj; - return JS_CallConstructor2(ctx, bf->func_obj, new_target, - arg_count, arg_buf); + aux->sp = sp; + return 0; +} + +int jit_op_typeof_is_function(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + if (js_operator_typeof(ctx, sp[-1]) == JS_ATOM_function) { + JS_FreeValue(ctx, sp[-1]); + sp[-1] = JS_TRUE; } else { - return JS_Call(ctx, bf->func_obj, bf->this_val, - arg_count, arg_buf); + JS_FreeValue(ctx, sp[-1]); + sp[-1] = JS_FALSE; } + aux->sp = sp; + return 0; } -/* argument of OP_special_object */ -typedef enum { - OP_SPECIAL_OBJECT_ARGUMENTS, - OP_SPECIAL_OBJECT_MAPPED_ARGUMENTS, - OP_SPECIAL_OBJECT_THIS_FUNC, - OP_SPECIAL_OBJECT_NEW_TARGET, - OP_SPECIAL_OBJECT_HOME_OBJECT, - OP_SPECIAL_OBJECT_VAR_OBJECT, - OP_SPECIAL_OBJECT_IMPORT_META, - OP_SPECIAL_OBJECT_NULL_PROTO, -} OPSpecialObjectEnum; +/* ---- with_* opcodes ---- */ +/* Return protocol: 0=fall-through (not found), 1=branch-taken (found), + * -1=exception. */ + +int jit_op_with(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + JSValue *sp = aux->sp; + const uint8_t *pc_arg = pc + 1; + OPCodeEnum opcode = (OPCodeEnum)*pc; + JSStackFrame *sf = (JSStackFrame *)aux->sf; + JSAtom atom; + int32_t diff; + JSValue obj, val; + int ret, is_with; + + atom = get_u32(pc_arg); + diff = get_u32(pc_arg + 4); + is_with = pc_arg[8]; + (void)diff; /* branch handled by JIT emitter, not here */ + + sf->cur_pc = (uint8_t *)pc; + obj = sp[-1]; + ret = JS_HasProperty(ctx, obj, atom); + if (unlikely(ret < 0)) + goto jit_exception; + if (ret) { + if (is_with) { + ret = js_has_unscopable(ctx, obj, atom); + if (unlikely(ret < 0)) + goto jit_exception; + if (ret) + goto with_not_found; + } + switch (opcode) { + case OP_with_get_var: + val = JS_GetProperty(ctx, obj, atom); + if (unlikely(JS_IsException(val))) + goto jit_exception; + set_value(ctx, &sp[-1], val); + break; + case OP_with_put_var: + ret = JS_SetPropertyInternal(ctx, obj, atom, sp[-2], + JS_PROP_THROW_STRICT); + JS_FreeValue(ctx, sp[-1]); + sp -= 2; + if (unlikely(ret < 0)) + goto jit_exception; + break; + case OP_with_delete_var: + ret = JS_DeleteProperty(ctx, obj, atom, 0); + if (unlikely(ret < 0)) + goto jit_exception; + JS_FreeValue(ctx, sp[-1]); + sp[-1] = js_bool(ret); + break; + case OP_with_make_ref: + *sp++ = JS_AtomToValue(ctx, atom); + break; + case OP_with_get_ref: + val = JS_GetProperty(ctx, obj, atom); + if (unlikely(JS_IsException(val))) + goto jit_exception; + *sp++ = val; + break; + case OP_with_get_ref_undef: + val = JS_GetProperty(ctx, obj, atom); + if (unlikely(JS_IsException(val))) + goto jit_exception; + JS_FreeValue(ctx, sp[-1]); + sp[-1] = JS_UNDEFINED; + *sp++ = val; + break; + default: + break; + } + /* Branch taken — return 1 to JIT emitter */ + aux->sp = sp; + return 1; + } else { + with_not_found: + /* Not found in with scope — free obj, fall through */ + JS_FreeValue(ctx, sp[-1]); + sp--; + } + aux->sp = sp; + return 0; +jit_exception: + aux->sp = sp; + return -1; +} + +/* + * Universal single-opcode executor called from JIT-compiled code + * for opcodes that don't have specialized sljit emitters. + * + * pc points to the opcode byte in the bytecode stream. + * aux->sp is read/written to reflect stack changes. + * + * Returns 0 on success, -1 on exception. + */ +int qjs_jit_exec(JSContext *ctx, JitAux *aux, const uint8_t *pc) +{ + /* All opcodes are now handled by individual jit_op_* helpers. + * This function should no longer be called. */ + (void)ctx; + (void)aux; + (void)pc; + abort(); + return -1; +} +#endif /* CONFIG_JIT */ -#define FUNC_RET_AWAIT 0 -#define FUNC_RET_YIELD 1 -#define FUNC_RET_YIELD_STAR 2 -#ifdef ENABLE_DUMPS // JS_DUMP_BYTECODE_* -static void dump_single_byte_code(JSContext *ctx, const uint8_t *pc, - JSFunctionBytecode *b, int start_pos); -static void print_func_name(JSFunctionBytecode *b); -#endif static bool needs_backtrace(JSValue exc) { @@ -17234,6 +20480,117 @@ static bool needs_backtrace(JSValue exc) return !find_own_property1(p, JS_ATOM_stack); } +#ifdef CONFIG_JIT +/* + * JIT exception handler: unwind the JS stack looking for a catch + * offset, then look up the corresponding native code address in the + * dispatch table. + * + * Returns the native address of the catch handler to jump to, + * or NULL if no handler was found (exception should propagate). + * + * On success: aux->sp is updated with exception value pushed on stack, + * rt->current_exception is cleared. + */ +void *jit_unwind_exception(JSContext *ctx, JitAux *aux) +{ + JSRuntime *rt = ctx->rt; + JSStackFrame *sf = (JSStackFrame *)aux->sf; + JSValue *sp = aux->sp; + JSValue *stack_buf = aux->stack_buf; + JitDispatchEntry *table = aux->dispatch_table; + int n_dispatch = aux->dispatch_count; + + /* Build backtrace */ + if (needs_backtrace(rt->current_exception) || + JS_IsUndefined(ctx->error_back_trace)) { + sf->cur_pc = NULL; + build_backtrace(ctx, rt->current_exception, JS_UNDEFINED, + NULL, 0, 0, 0); + } + + /* Uncatchable errors propagate directly */ + if (JS_IsUncatchableError(rt->current_exception)) { + aux->sp = sp; + return NULL; + } + + /* Stack unwinding — same logic as interpreter's exception: handler */ + while (sp > stack_buf) { + JSValue val = *--sp; + JS_FreeValue(ctx, val); + + if (JS_VALUE_GET_TAG(val) == JS_TAG_CATCH_OFFSET) { + int pos = JS_VALUE_GET_INT(val); + + if (pos == 0) { + /* Iterator cleanup (for-of) */ + JS_FreeValue(ctx, sp[-1]); /* drop the next method */ + sp--; + JS_IteratorClose(ctx, sp[-1], true); + continue; + } + + /* Found a catch handler — push exception on stack */ + *sp++ = rt->current_exception; + rt->current_exception = JS_UNINITIALIZED; + JS_FreeValueRT(rt, ctx->error_back_trace); + ctx->error_back_trace = JS_UNDEFINED; + + /* Look up native address for this bytecode position */ + for (int i = 0; i < n_dispatch; i++) { + if (table[i].bc_pos == pos) { + aux->sp = sp; + return table[i].native_addr; + } + } + + /* Position not in dispatch table — shouldn't happen. + * Put exception back and break to propagate. */ + rt->current_exception = *--sp; + break; + } + } + + aux->sp = sp; + return NULL; /* No handler found — propagate to caller */ +} + +/* + * OP_ret helper: pop return address from JS stack, look up the + * native code address in the dispatch table. + * + * Returns the native address to indirect-jump to, or NULL on error. + */ +void *qjs_jit_ret(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue op1; + int pos; + JitDispatchEntry *table = aux->dispatch_table; + int n_dispatch = aux->dispatch_count; + + op1 = *--sp; + if (unlikely(JS_VALUE_GET_TAG(op1) != JS_TAG_INT)) { + JS_ThrowInternalError(ctx, "jit_ret: bad return address tag"); + aux->sp = sp; + return NULL; + } + pos = JS_VALUE_GET_INT(op1); + aux->sp = sp; + + /* Look up native address */ + for (int i = 0; i < n_dispatch; i++) { + if (table[i].bc_pos == pos) { + return table[i].native_addr; + } + } + + JS_ThrowInternalError(ctx, "jit_ret: bc_pos=%d not in dispatch table", pos); + return NULL; +} +#endif /* CONFIG_JIT */ + /* argv[] is modified if (flags & JS_CALL_FLAG_COPY_ARGV) = 0. */ static JSValue JS_CallInternal(JSContext *caller_ctx, JSValueConst func_obj, JSValueConst this_obj, JSValueConst new_target, @@ -17297,8 +20654,59 @@ static JSValue JS_CallInternal(JSContext *caller_ctx, JSValueConst func_obj, rt->current_stack_frame = sf; if (s->throw_flag) goto exception; - else - goto restart; +#ifdef CONFIG_JIT + if (b->jitcode) { + int bc_pos = (int)(pc - b->byte_code_buf); + void *resume_addr = NULL; + int i; + rt->jit_call_count++; + for (i = 0; i < b->jit_dispatch_count; i++) { + if (b->jit_dispatch_table[i].bc_pos == bc_pos) { + resume_addr = b->jit_dispatch_table[i].native_addr; + break; + } + } + if (resume_addr) { + JitAux aux; + int jit_result; + aux.stack_buf = stack_buf; + aux.var_buf = var_buf; + aux.arg_buf = arg_buf; + aux.sp = sp; + aux.var_refs = (struct JSVarRef **)var_refs; + aux.sf = (struct JSStackFrame *)sf; + aux.p = p; + aux.caller_ctx = caller_ctx; + aux.ret_val = JS_UNDEFINED; + aux.b = b; + aux.this_obj = s->this_val; + aux.new_target = JS_UNDEFINED; + aux.func_obj = sf->cur_func; + aux.argc = s->argc; + aux.argv = vc(sf->arg_buf); + aux.dispatch_table = b->jit_dispatch_table; + aux.dispatch_count = b->jit_dispatch_count; + aux.ic_cache = b->jit_ic_cache; + aux.ic_count = b->jit_ic_count; + aux.resume_native_addr = resume_addr; + aux.resume_bc_pc = NULL; + jit_result = ((JitFunc)b->jitcode)(ctx, &aux); + sp = aux.sp; + if (jit_result == 2) { + sf->cur_pc = (uint8_t *)aux.resume_bc_pc; + sf->cur_sp = sp; + ret_val = aux.ret_val; + rt->current_stack_frame = sf->prev_frame; + return ret_val; + } + if (jit_result != 0) + goto exception; + ret_val = aux.ret_val; + goto done; + } + } +#endif + goto restart; } else { goto not_a_function; } @@ -17364,6 +20772,47 @@ static JSValue JS_CallInternal(JSContext *caller_ctx, JSValueConst func_obj, rt->current_stack_frame = sf; ctx = b->realm; /* set the current realm */ +#ifdef CONFIG_JIT + if (b->jitcode) { + JitAux aux; + int jit_result; + ctx->rt->jit_call_count++; + aux.stack_buf = stack_buf; + aux.var_buf = var_buf; + aux.arg_buf = arg_buf; + aux.sp = sp; + aux.var_refs = (struct JSVarRef **)var_refs; + aux.sf = (struct JSStackFrame *)sf; + aux.p = p; + aux.caller_ctx = caller_ctx; + aux.ret_val = JS_UNDEFINED; + aux.b = b; + aux.this_obj = this_obj; + aux.new_target = new_target; + aux.func_obj = func_obj; + aux.argc = argc; + aux.argv = argv; + aux.dispatch_table = b->jit_dispatch_table; + aux.dispatch_count = b->jit_dispatch_count; + aux.ic_cache = b->jit_ic_cache; + aux.ic_count = b->jit_ic_count; + aux.resume_native_addr = NULL; + jit_result = ((JitFunc)b->jitcode)(ctx, &aux); + sp = aux.sp; + if (jit_result == 2) { + sf->cur_pc = (uint8_t *)aux.resume_bc_pc; + sf->cur_sp = sp; + ret_val = aux.ret_val; + rt->current_stack_frame = sf->prev_frame; + return ret_val; + } + if (jit_result != 0) + goto exception; + ret_val = aux.ret_val; + goto done; + } +#endif + #ifdef ENABLE_DUMPS // JS_DUMP_BYTECODE_STEP if (check_dump_flag(ctx->rt, JS_DUMP_BYTECODE_STEP)) print_func_name(b); @@ -35385,6 +38834,32 @@ static JSValue js_create_function(JSContext *ctx, JSFunctionDef *fd) list_del(&fd->link); } +#ifdef CONFIG_JIT + { + JitFunc jitcode = NULL; + void *jit_code_ptr = NULL; + JitDispatchEntry *dispatch_table = NULL; + int dispatch_count = 0; + PropIC *ic_cache = NULL; + int ic_count = 0; + js_sljit_compile(ctx, b->byte_code_buf, b->byte_code_len, + b->arg_count, b->var_count, b->stack_size, + &jitcode, &jit_code_ptr, + &dispatch_table, &dispatch_count, + &ic_cache, &ic_count); + b->jitcode = (void *)jitcode; + b->jit_code_ptr = jit_code_ptr; + b->jit_dispatch_table = dispatch_table; + b->jit_dispatch_count = dispatch_count; + b->jit_ic_cache = ic_cache; + b->jit_ic_count = ic_count; + if (jitcode) + ctx->rt->jit_compile_count++; + else + ctx->rt->jit_compile_fail_count++; + } +#endif + js_free(ctx, fd); return JS_MKPTR(JS_TAG_FUNCTION_BYTECODE, b); fail: @@ -35398,6 +38873,20 @@ static void free_function_bytecode(JSRuntime *rt, JSFunctionBytecode *b) { int i; +#ifdef CONFIG_JIT + if (b->jit_code_ptr) + js_sljit_free(b->jit_code_ptr); + if (b->jit_dispatch_table) + js_free_rt(rt, b->jit_dispatch_table); + if (b->jit_ic_cache) { + for (int i = 0; i < b->jit_ic_count; i++) { + if (b->jit_ic_cache[i].cached_shape) + js_free_shape(rt, (JSShape *)b->jit_ic_cache[i].cached_shape); + } + js_free_rt(rt, b->jit_ic_cache); + } +#endif + if (b->byte_code_buf) free_bytecode_atoms(rt, b->byte_code_buf, b->byte_code_len, true); diff --git a/quickjs.h b/quickjs.h index c86f0f062..9c1b8caf6 100644 --- a/quickjs.h +++ b/quickjs.h @@ -459,6 +459,9 @@ JS_EXTERN void JS_MarkValue(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); JS_EXTERN void JS_RunGC(JSRuntime *rt); JS_EXTERN bool JS_IsLiveObject(JSRuntime *rt, JSValueConst obj); +JS_EXTERN bool JS_IsJITCompiled(JSContext *ctx, JSValueConst val); +JS_EXTERN void JS_GetJITStats(JSRuntime *rt, uint32_t *compiled, + uint32_t *failed, uint32_t *calls); JS_EXTERN JSContext *JS_NewContext(JSRuntime *rt); JS_EXTERN void JS_FreeContext(JSContext *s); diff --git a/quickjs_sljit.c b/quickjs_sljit.c new file mode 100644 index 000000000..6a9c59023 --- /dev/null +++ b/quickjs_sljit.c @@ -0,0 +1,3278 @@ +/* + * QuickJS sljit JIT compiler + * + * Copyright (c) 2025 QuickJS-ng contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "quickjs.h" +#include "cutils.h" +/* + * CONFIG_JIT must be defined before including quickjs-jit.h so that + * the JitAux / JitFunc definitions are visible. + */ +#ifndef CONFIG_JIT +#define CONFIG_JIT +#endif + +#include "quickjs-jit.h" + +/* ---- sljit all-in-one inclusion ---- */ +#define SLJIT_CONFIG_AUTO 1 +#define SLJIT_CONFIG_STATIC 1 +#include "sljit/sljit_src/sljitLir.c" + +/* ---- Opcode definitions ---- */ + +/* Build the opcode enum (non-temp opcodes only, matching quickjs.c) */ +typedef enum JitOPCodeEnum { +#define FMT(f) +#define DEF(id, size, n_pop, n_push, f) OP_ ## id, +#define def(id, size, n_pop, n_push, f) +#include "quickjs-opcode.h" +#undef def +#undef DEF +#undef FMT + OP_COUNT, + OP_TEMP_START = OP_nop + 1, + OP___dummy = OP_TEMP_START - 1, +#define FMT(f) +#define DEF(id, size, n_pop, n_push, f) +#define def(id, size, n_pop, n_push, f) OP_ ## id, +#include "quickjs-opcode.h" +#undef def +#undef DEF +#undef FMT + OP_TEMP_END, +} JitOPCodeEnum; + +/* Opcode sizes (bytes) for bytecode scanning */ +static const uint8_t jit_opcode_size[OP_COUNT] = { +#define FMT(f) +#define DEF(id, size, n_pop, n_push, f) size, +#define def(id, size, n_pop, n_push, f) +#include "quickjs-opcode.h" +#undef def +#undef DEF +#undef FMT +}; + +/* ---- Constants ---- */ + +#define JSV_SIZE ((sljit_sw)sizeof(JSValue)) + +/* For struct JSValue (non-NaN-boxing), field offsets within JSValue */ +#if !(defined(JS_NAN_BOXING) && JS_NAN_BOXING) && !defined(JS_CHECK_JSVALUE) +#define JSV_U_OFF 0 /* offsetof(JSValue, u) -- always 0 */ +#define JSV_TAG_OFF ((sljit_sw)offsetof(JSValue, tag)) +#endif + +/* ---- Register assignments ---- */ + +#define REG_CTX SLJIT_S0 /* JSContext *ctx (first arg, preserved) */ +#define REG_AUX SLJIT_S1 /* JitAux *aux (second arg, preserved) */ +#define REG_SP SLJIT_S2 /* JSValue *sp (stack pointer, updated) */ +#define REG_VBUF SLJIT_S3 /* JSValue *var_buf (preserved) */ +#define REG_ABUF SLJIT_S4 /* JSValue *arg_buf (preserved) */ + +/* ---- Deferred jump for forward branches ---- */ + +typedef struct JitJumpPatch { + struct sljit_jump *jump; + int target_pc; +} JitJumpPatch; + +/* ---- C helper functions called by JIT code via icall ---- */ + +/* + * Note: In quickjs-ng, JS_DupValue/JS_FreeValue are extern functions + * (not inlined), so we use them directly for refcount management. + * The ctx parameter is needed for JS_DupValue/JS_FreeValue. + */ + +/* Load variable/argument at index and push to stack (with refcount inc) */ +static void jit_helper_get_var(JSContext *ctx, JSValue *sp, + JSValue *buf, sljit_sw idx) +{ + *sp = JS_DupValue(ctx, buf[idx]); +} + +/* Pop from stack and store to variable/argument (frees old value) */ +static void jit_helper_put_var(JSContext *ctx, JSValue *sp, + JSValue *buf, sljit_sw idx) +{ + JS_FreeValue(ctx, buf[idx]); + buf[idx] = sp[-1]; +} + +/* Like put_var but keep value on stack (dup + store) */ +static void jit_helper_set_var(JSContext *ctx, JSValue *sp, + JSValue *buf, sljit_sw idx) +{ + JSValue v = sp[-1]; + JS_FreeValue(ctx, buf[idx]); + buf[idx] = JS_DupValue(ctx, v); +} + +/* Free the value at sp[-1] (caller must decrement sp) */ +static void jit_helper_drop(JSContext *ctx, JSValue *sp) +{ + JS_FreeValue(ctx, sp[-1]); +} + +/* Duplicate sp[-1] to sp[0] with refcount (caller must increment sp) */ +static void jit_helper_dup(JSContext *ctx, JSValue *sp) +{ + sp[0] = JS_DupValue(ctx, sp[-1]); +} + +/* Swap sp[-1] and sp[-2] */ +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) +static void jit_helper_swap(JSValue *sp) +{ + JSValue tmp = sp[-1]; + sp[-1] = sp[-2]; + sp[-2] = tmp; +} +#endif + +/* nip: a b -> b. Free a, keep b. */ +static void jit_helper_nip(JSContext *ctx, JSValue *sp) +{ + JS_FreeValue(ctx, sp[-2]); + sp[-2] = sp[-1]; +} + +/* Convert sp[-1] to boolean, free it. Returns 0 or 1. */ +static sljit_sw jit_helper_to_bool_free(JSContext *ctx, JSValue *sp) +{ + JSValue v = sp[-1]; + int tag = JS_VALUE_GET_TAG(v); + /* Fast path for INT and BOOL (tags 0 and 1) */ + if ((unsigned)tag <= JS_TAG_UNDEFINED) { + return JS_VALUE_GET_INT(v); + } + /* Slow path: convert and free */ + { + int res = JS_ToBool(ctx, v); + JS_FreeValue(ctx, v); + return res; + } +} + +/* Store return value from sp[-1] into aux, set aux->sp. */ +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) +static void jit_helper_return(JitAux *aux, JSValue *sp) +{ + aux->ret_val = sp[-1]; + aux->sp = sp - 1; +} +#endif + +/* + * Generator suspend helper: save sp and ret_val into aux. + * The caller (JIT code) has already set sf->cur_pc via the bytecode + * pointer baked into the generated code. cur_sp is set by the C caller + * after JIT returns. + */ +static void jit_helper_generator_suspend(JitAux *aux, JSValue *sp, + sljit_sw suspend_code, + const uint8_t *resume_pc) +{ + aux->sp = sp; + /* 0=FUNC_RET_AWAIT, 1=FUNC_RET_YIELD, 2=FUNC_RET_YIELD_STAR → js_int32(code) + * 3=initial_yield/return_async → JS_UNDEFINED */ + if (suspend_code <= 2) + aux->ret_val = JS_MKVAL(JS_TAG_INT, (int32_t)suspend_code); + else + aux->ret_val = JS_UNDEFINED; + aux->resume_native_addr = NULL; + aux->resume_bc_pc = resume_pc; +} + +/* Store JS_UNDEFINED as return value into aux, set aux->sp. */ +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) +static void jit_helper_return_undef(JitAux *aux, JSValue *sp) +{ + aux->ret_val = JS_UNDEFINED; + aux->sp = sp; +} +#endif + +/* ---- JIT opcode helpers ---- */ + +/* ---- stack manipulation ---- */ + +static int jit_op_nip1(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JS_FreeValue(ctx, sp[-3]); + sp[-3] = sp[-2]; + sp[-2] = sp[-1]; + sp--; + aux->sp = sp; + return 0; +} + +static int jit_op_dup1(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + sp[0] = sp[-1]; + sp[-1] = JS_DupValue(ctx, sp[-2]); + sp++; + aux->sp = sp; + return 0; +} + +static int jit_op_dup2(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + sp[0] = JS_DupValue(ctx, sp[-2]); + sp[1] = JS_DupValue(ctx, sp[-1]); + sp += 2; + aux->sp = sp; + return 0; +} + +static int jit_op_dup3(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + sp[0] = JS_DupValue(ctx, sp[-3]); + sp[1] = JS_DupValue(ctx, sp[-2]); + sp[2] = JS_DupValue(ctx, sp[-1]); + sp += 3; + aux->sp = sp; + return 0; +} + +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) +static int jit_op_insert2(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + sp[0] = sp[-1]; + sp[-1] = sp[-2]; + sp[-2] = JS_DupValue(ctx, sp[0]); + sp++; + aux->sp = sp; + return 0; +} + +static int jit_op_insert3(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + sp[0] = sp[-1]; + sp[-1] = sp[-2]; + sp[-2] = sp[-3]; + sp[-3] = JS_DupValue(ctx, sp[0]); + sp++; + aux->sp = sp; + return 0; +} +#endif + +static int jit_op_insert4(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + sp[0] = sp[-1]; + sp[-1] = sp[-2]; + sp[-2] = sp[-3]; + sp[-3] = sp[-4]; + sp[-4] = JS_DupValue(ctx, sp[0]); + sp++; + aux->sp = sp; + return 0; +} + +static int jit_op_perm3(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue tmp; + (void)ctx; + tmp = sp[-2]; + sp[-2] = sp[-3]; + sp[-3] = tmp; + aux->sp = sp; + return 0; +} + +static int jit_op_perm4(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue tmp; + (void)ctx; + tmp = sp[-2]; + sp[-2] = sp[-3]; + sp[-3] = sp[-4]; + sp[-4] = tmp; + aux->sp = sp; + return 0; +} + +static int jit_op_perm5(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue tmp; + (void)ctx; + tmp = sp[-2]; + sp[-2] = sp[-3]; + sp[-3] = sp[-4]; + sp[-4] = sp[-5]; + sp[-5] = tmp; + aux->sp = sp; + return 0; +} + +static int jit_op_rot3l(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue tmp; + (void)ctx; + tmp = sp[-3]; + sp[-3] = sp[-2]; + sp[-2] = sp[-1]; + sp[-1] = tmp; + aux->sp = sp; + return 0; +} + +static int jit_op_rot3r(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue tmp; + (void)ctx; + tmp = sp[-1]; + sp[-1] = sp[-2]; + sp[-2] = sp[-3]; + sp[-3] = tmp; + aux->sp = sp; + return 0; +} + +static int jit_op_rot4l(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue tmp; + (void)ctx; + tmp = sp[-4]; + sp[-4] = sp[-3]; + sp[-3] = sp[-2]; + sp[-2] = sp[-1]; + sp[-1] = tmp; + aux->sp = sp; + return 0; +} + +static int jit_op_rot5l(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue tmp; + (void)ctx; + tmp = sp[-5]; + sp[-5] = sp[-4]; + sp[-4] = sp[-3]; + sp[-3] = sp[-2]; + sp[-2] = sp[-1]; + sp[-1] = tmp; + aux->sp = sp; + return 0; +} + +static int jit_op_swap2(JSContext *ctx, JitAux *aux) +{ + JSValue *sp = aux->sp; + JSValue tmp1, tmp2; + (void)ctx; + tmp1 = sp[-4]; + tmp2 = sp[-3]; + sp[-4] = sp[-2]; + sp[-3] = sp[-1]; + sp[-2] = tmp1; + sp[-1] = tmp2; + aux->sp = sp; + return 0; +} + +/* ---- Inline JSValue emit helpers ---- */ + +/* Push a known-constant JSValue onto the JIT stack (compile time). */ +static void emit_push_const_jsv(struct sljit_compiler *C, JSValue v) +{ +#if defined(JS_NAN_BOXING) && JS_NAN_BOXING + /* JSValue = uint64_t, fits in one word on 64-bit, two on 32-bit */ + #if (SLJIT_WORD_SIZE == 64) + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), 0, + SLJIT_IMM, (sljit_sw)v); + #else + /* 32-bit NaN boxing: store low 32 bits then high 32 bits */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), 0, + SLJIT_IMM, (sljit_sw)(uint32_t)v); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), 4, + SLJIT_IMM, (sljit_sw)(uint32_t)(v >> 32)); + #endif +#elif defined(JS_CHECK_JSVALUE) + /* Pointer mode: JSValue is a pointer, fits in a word */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), 0, + SLJIT_IMM, (sljit_sw)v); +#else + /* Struct mode: JSValue = { JSValueUnion u; int64_t tag; } */ + /* Store union (word-sized, zero-extended int32) */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), JSV_U_OFF, + SLJIT_IMM, (sljit_sw)JS_VALUE_GET_INT(v)); + /* Store tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), JSV_TAG_OFF, + SLJIT_IMM, (sljit_sw)JS_VALUE_GET_TAG(v)); +#endif + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); +} + +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) + +/* Emit icall to jit_helper_get_var(ctx, sp, buf_reg, idx), then sp++ */ +static void emit_get_var(struct sljit_compiler *C, + sljit_s32 buf_reg, sljit_sw idx) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, buf_reg, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, SLJIT_IMM, idx); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS4V(P, P, P, W), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_get_var)); + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); +} + +/* Emit icall to jit_helper_put_var(ctx, sp, buf_reg, idx), then sp-- */ +static void emit_put_var(struct sljit_compiler *C, + sljit_s32 buf_reg, sljit_sw idx) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, buf_reg, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, SLJIT_IMM, idx); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS4V(P, P, P, W), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_put_var)); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); +} + +#endif /* NaN-boxing / CHECK mode */ + +/* Emit icall to jit_helper_set_var(ctx, sp, buf_reg, idx). No sp change. */ +static void emit_set_var(struct sljit_compiler *C, + sljit_s32 buf_reg, sljit_sw idx) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, buf_reg, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, SLJIT_IMM, idx); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS4V(P, P, P, W), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_set_var)); +} + +/* Emit drop: call jit_helper_drop(ctx, sp), then sp-- */ +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) +static void emit_drop(struct sljit_compiler *C) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2V(P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_drop)); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); +} +#endif + +/* Emit dup: call jit_helper_dup(ctx, sp), then sp++ */ +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) +static void emit_dup(struct sljit_compiler *C) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2V(P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_dup)); + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); +} + +/* Emit swap: call jit_helper_swap(sp) */ +static void emit_swap(struct sljit_compiler *C) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS1V(P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_swap)); +} + +/* Emit nip: call jit_helper_nip(ctx, sp), then sp-- */ +static void emit_nip(struct sljit_compiler *C) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2V(P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_nip)); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); +} +#endif + +/* + * ---- Inline fast-path emitters (struct mode only) ---- + * + * These functions emit inline machine code for the common case (integer or + * non-refcounted values) and fall back to calling the C helper for the + * uncommon case. They are guarded by the struct-mode preprocessor check + * because NaN-boxing and CHECK modes have different JSValue layouts. + * + * Pattern: + * 1. Load tag from memory + * 2. Check tag for fast-path condition + * 3. Fast path: do operation inline (memcpy int32+tag, arithmetic, etc.) + * 4. Jump over slow path + * 5. Slow path: call existing C helper + * 6. Done label + */ + +#if !(defined(JS_NAN_BOXING) && JS_NAN_BOXING) && !defined(JS_CHECK_JSVALUE) + +/* + * emit_get_var_fast: Inline get_loc/get_arg for non-refcounted values. + * + * For non-refcounted types (tag >= 0: INT, BOOL, NULL, UNDEFINED, FLOAT64), + * we can simply memcpy the 16-byte JSValue to the stack without calling + * JS_DupValue. For refcounted types (tag < 0), fall back to helper. + * + * Generated code: + * R0 = buf[idx].tag + * if (R0 < 0) goto slow_path + * sp[0].u = buf[idx].u (word-sized copy) + * sp[0].tag = R0 + * sp += JSV_SIZE + * goto done + * slow_path: + * call jit_helper_get_var(ctx, sp, buf, idx) + * sp += JSV_SIZE + * done: + */ +static void emit_get_var_fast(struct sljit_compiler *C, + sljit_s32 buf_reg, sljit_sw idx) +{ + struct sljit_jump *slow_path, *done; + sljit_sw byte_off = idx * JSV_SIZE; + + /* Load tag from buf[idx].tag into R0 */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(buf_reg), byte_off + JSV_TAG_OFF); + + /* If tag < 0 (refcounted), go slow path */ + slow_path = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_R0, 0, SLJIT_IMM, 0); + + /* Fast path: copy u (word at offset 0) and tag to sp[0] */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(buf_reg), byte_off + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), JSV_U_OFF, + SLJIT_R1, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), JSV_TAG_OFF, + SLJIT_R0, 0); + /* sp++ */ + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path: call helper */ + sljit_set_label(slow_path, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, buf_reg, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, SLJIT_IMM, idx); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS4V(P, P, P, W), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_get_var)); + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + + /* Done */ + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * emit_put_var_fast: Inline put_loc/put_arg when old value is non-refcounted. + * + * If the old value's tag >= 0, we can just overwrite without calling + * JS_FreeValue. Otherwise, fall back to helper which frees the old value. + * + * Generated code: + * R0 = buf[idx].tag + * if (R0 < 0) goto slow_path + * buf[idx].u = sp[-1].u + * buf[idx].tag = sp[-1].tag + * sp -= JSV_SIZE + * goto done + * slow_path: + * call jit_helper_put_var(ctx, sp, buf, idx) + * sp -= JSV_SIZE + * done: + */ +static void emit_put_var_fast(struct sljit_compiler *C, + sljit_s32 buf_reg, sljit_sw idx) +{ + struct sljit_jump *slow_path, *done; + sljit_sw byte_off = idx * JSV_SIZE; + + /* Load old tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(buf_reg), byte_off + JSV_TAG_OFF); + + /* If old tag < 0 (refcounted), need to free → slow path */ + slow_path = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_R0, 0, SLJIT_IMM, 0); + + /* Fast path: overwrite without free */ + /* Load sp[-1].u and sp[-1].tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + /* Store to buf[idx] */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(buf_reg), byte_off + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(buf_reg), byte_off + JSV_TAG_OFF, + SLJIT_R1, 0); + /* sp-- */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path */ + sljit_set_label(slow_path, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, buf_reg, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, SLJIT_IMM, idx); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS4V(P, P, P, W), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_put_var)); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * emit_drop_fast: Inline drop for non-refcounted values. + * + * If sp[-1].tag >= 0, just decrement sp (no free needed). + * Otherwise call jit_helper_drop + decrement. + */ +static void emit_drop_fast(struct sljit_compiler *C) +{ + struct sljit_jump *slow_path, *done; + + /* Load sp[-1].tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + + /* If tag < 0 → slow path (need JS_FreeValue) */ + slow_path = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_R0, 0, SLJIT_IMM, 0); + + /* Fast path: just sp-- */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path */ + sljit_set_label(slow_path, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2V(P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_drop)); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * emit_branch_fast: Inline if_false/if_true for JS_TAG_INT values. + * + * Both paths converge with the truthiness value in SLJIT_R0, + * then a single comparison emits the branch jump. + * + * sense=0: branch if false (value == 0) + * sense=1: branch if true (value != 0) + */ +static struct sljit_jump *emit_branch_fast(struct sljit_compiler *C, int sense) +{ + struct sljit_jump *not_int, *skip_slow; + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + + not_int = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Fast path: R0 = int32 value (truthiness), sp-- */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + skip_slow = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path: R0 = to_bool_free result (truthiness), sp-- */ + sljit_set_label(not_int, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_to_bool_free)); + /* icall returns in SLJIT_RETURN_REG (== SLJIT_R0) */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + + /* Converge: R0 has truthiness value from either path */ + sljit_set_label(skip_slow, sljit_emit_label(C)); + return sljit_emit_cmp(C, + sense ? SLJIT_NOT_EQUAL : SLJIT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, 0); +} + +/* + * emit_add_sub_fast: Inline integer add/sub with overflow detection. + * + * Fast path: if both sp[-1] and sp[-2] have tag == JS_TAG_INT, + * perform the 32-bit operation inline with overflow check. + * On overflow or non-integer operands, fall back to the C helper. + * + * On 64-bit: sign-extend both int32 to 64-bit, add/sub in 64-bit, + * then check if result fits in int32 range. + * On 32-bit: use SLJIT_ADD/SLJIT_SUB with SLJIT_SET_OVERFLOW. + * + * is_sub: 0 = add, 1 = sub + */ +static void emit_add_sub_fast(struct sljit_compiler *C, + int is_sub, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *not_int1, *not_int2, *done; + void *helper_fn = is_sub ? (void *)qjs_jit_sub : (void *)qjs_jit_add; + + /* Load sp[-1].tag into R0 */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + + /* Check sp[-1].tag == JS_TAG_INT */ + not_int1 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Load sp[-2].tag into R0 */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + + /* Check sp[-2].tag == JS_TAG_INT */ + not_int2 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Both are integers. Load int32 values. */ + /* R0 = sp[-2].u.int32 (left operand) */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF); + /* R1 = sp[-1].u.int32 (right operand) */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + +#if (SLJIT_WORD_SIZE == 64) + /* 64-bit platform: add/sub in 64-bit, then check int32 range. + * Since both values are sign-extended 32-bit values in 64-bit registers, + * the result of add/sub will be at most 33 bits. We check if the + * result fits in int32 by sign-extending from 32 to 64 and comparing. */ + if (is_sub) { + sljit_emit_op2(C, SLJIT_SUB, SLJIT_R0, 0, + SLJIT_R0, 0, SLJIT_R1, 0); + } else { + sljit_emit_op2(C, SLJIT_ADD, SLJIT_R0, 0, + SLJIT_R0, 0, SLJIT_R1, 0); + } + /* R1 = sign-extend R0 from 32 to 64 */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R1, 0, SLJIT_R0, 0); + /* If R0 != R1, overflow occurred → slow path */ + { + struct sljit_jump *overflow = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_R1, 0); + /* No overflow: store result to sp[-2].u.int32, tag stays JS_TAG_INT */ + sljit_emit_op1(C, SLJIT_MOV_S32, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + /* sp-- (pop right operand) */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Overflow → slow path */ + sljit_set_label(overflow, sljit_emit_label(C)); + } +#else + /* 32-bit platform: use SLJIT_SET_OVERFLOW */ + if (is_sub) { + sljit_emit_op2(C, SLJIT_SUB | SLJIT_SET_OVERFLOW, + SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_R1, 0); + } else { + sljit_emit_op2(C, SLJIT_ADD | SLJIT_SET_OVERFLOW, + SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_R1, 0); + } + { + struct sljit_jump *overflow = sljit_emit_jump(C, SLJIT_OVERFLOW); + + /* No overflow: store result */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + /* sp-- */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + sljit_set_label(overflow, sljit_emit_label(C)); + } +#endif + + /* Slow path: fall back to C helper */ + { + struct sljit_label *slow_label = sljit_emit_label(C); + sljit_set_label(not_int1, slow_label); + sljit_set_label(not_int2, slow_label); + } + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(helper_fn)); + { + struct sljit_jump *exc_jump = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[(*n_exc_jumps)++] = exc_jump; + } + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * emit_mul_fast: Inline integer multiplication. + * + * On 64-bit: multiply two sign-extended int32 values as 64-bit, + * result always fits in 63 bits. Check if result fits int32. + * Special case: 0 result needs sign check for -0. + * + * On 32-bit: just use the slow path (overflow detection for mul + * is complex with SLJIT on 32-bit, and SLJIT_MUL has no SET_OVERFLOW + * on some backends). + */ +static void emit_mul_fast(struct sljit_compiler *C, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ +#if (SLJIT_WORD_SIZE == 64) + struct sljit_jump *not_int1, *not_int2, *done; + + /* Check sp[-1].tag == JS_TAG_INT */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + not_int1 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Check sp[-2].tag == JS_TAG_INT */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + not_int2 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Load int32 values (sign-extended to 64-bit) */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + + /* 64-bit multiply: int32 * int32 → fits in int63, no overflow possible. + * But we need to check if result fits in int32 range. */ + sljit_emit_op2(C, SLJIT_MUL, SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_R1, 0); + + /* Check result fits int32: sign-extend from 32 and compare */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R1, 0, SLJIT_R0, 0); + { + struct sljit_jump *overflow = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_R1, 0); + + /* Result fits int32. But check for -0: if result is 0 and either + * operand was negative, result should be -0.0 (float). We handle + * this by falling to slow path when result is 0 (rare case). */ + { + struct sljit_jump *zero_check = sljit_emit_cmp(C, SLJIT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, 0); + + /* Non-zero result that fits int32: store and done */ + sljit_emit_op1(C, SLJIT_MOV_S32, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Zero result → slow path (might need -0.0) */ + sljit_set_label(zero_check, sljit_emit_label(C)); + } + sljit_set_label(overflow, sljit_emit_label(C)); + } + + /* Slow path */ + { + struct sljit_label *slow_label = sljit_emit_label(C); + sljit_set_label(not_int1, slow_label); + sljit_set_label(not_int2, slow_label); + } + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(qjs_jit_mul)); + { + struct sljit_jump *exc_jump = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[(*n_exc_jumps)++] = exc_jump; + } + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + + sljit_set_label(done, sljit_emit_label(C)); + +#else + /* 32-bit: call C helper directly (no inline fast path for mul) */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(qjs_jit_mul)); + { + struct sljit_jump *exc_jump = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[(*n_exc_jumps)++] = exc_jump; + } + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); +#endif +} + +/* + * emit_swap_fast: Inline swap of sp[-1] and sp[-2]. + * + * Pure memory swap — no tag checks or refcounting needed. + * Uses R0-R3 as scratch to swap both 16-byte JSValues. + */ +static void emit_swap_fast(struct sljit_compiler *C) +{ + /* R0 = sp[-1].u, R1 = sp[-1].tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + /* R2 = sp[-2].u, R3 = sp[-2].tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + /* sp[-1] = old sp[-2] */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF, + SLJIT_R2, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF, + SLJIT_R3, 0); + /* sp[-2] = old sp[-1] */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF, + SLJIT_R1, 0); +} + +/* + * emit_dup_fast: Inline dup for non-refcounted values. + * + * If sp[-1].tag >= 0 (non-refcounted), just copy the 16-byte JSValue. + * Otherwise fall back to jit_helper_dup for JS_DupValue. + */ +static void emit_dup_fast(struct sljit_compiler *C) +{ + struct sljit_jump *slow_path, *done; + + /* R0 = sp[-1].tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + + /* If tag < 0 (refcounted) → slow path */ + slow_path = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_R0, 0, SLJIT_IMM, 0); + + /* Fast path: copy sp[-1] to sp[0] */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), JSV_U_OFF, + SLJIT_R1, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), JSV_TAG_OFF, + SLJIT_R0, 0); + /* sp++ */ + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path: call jit_helper_dup(ctx, sp) */ + sljit_set_label(slow_path, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2V(P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_dup)); + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * emit_nip_fast: Inline nip (a b → b) for non-refcounted 'a'. + * + * If sp[-2].tag >= 0, no JS_FreeValue needed — just overwrite and sp--. + * Otherwise fall back to jit_helper_nip. + */ +static void emit_nip_fast(struct sljit_compiler *C) +{ + struct sljit_jump *slow_path, *done; + + /* R0 = sp[-2].tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + + /* If tag < 0 (refcounted) → slow path (need JS_FreeValue) */ + slow_path = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_R0, 0, SLJIT_IMM, 0); + + /* Fast path: copy sp[-1] to sp[-2], then sp-- */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF, + SLJIT_R1, 0); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path: call jit_helper_nip(ctx, sp) */ + sljit_set_label(slow_path, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2V(P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_nip)); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * emit_relational_fast: Inline integer comparison for <, >, <=, >=. + * + * Fast path: both operands are JS_TAG_INT → compare via sljit_emit_op2u + * with SLJIT_SUB | SLJIT_SET_SIG_*, then sljit_emit_op_flags to produce + * a 0/1 boolean. No branches needed for the comparison itself. + * Slow path: call jit_op_relational(ctx, aux, opcode). + * + * SLJIT_SET uses paired flags: + * SLJIT_SET_SIG_LESS covers SLJIT_SIG_LESS and SLJIT_SIG_GREATER_EQUAL + * SLJIT_SET_SIG_GREATER covers SLJIT_SIG_GREATER and SLJIT_SIG_LESS_EQUAL + */ +static void emit_relational_fast(struct sljit_compiler *C, + int opcode, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *not_int1, *not_int2, *done; + sljit_s32 set_flag; /* flag to pass to SLJIT_SUB | set_flag */ + sljit_s32 cond; /* condition to read with sljit_emit_op_flags */ + + /* Map opcode to SLJIT_SET_* and the condition to read. + * SLJIT_SET uses paired flags, so the SET macro covers two conditions: + * SLJIT_SET_SIG_LESS = SLJIT_SET(SLJIT_SIG_LESS) → covers SIG_LESS & SIG_GREATER_EQUAL + * SLJIT_SET_SIG_GREATER = SLJIT_SET(SLJIT_SIG_GREATER) → covers SIG_GREATER & SIG_LESS_EQUAL + */ + switch (opcode) { + case OP_lt: + set_flag = SLJIT_SET_SIG_LESS; + cond = SLJIT_SIG_LESS; + break; + case OP_lte: + set_flag = SLJIT_SET_SIG_GREATER; /* covers SIG_LESS_EQUAL */ + cond = SLJIT_SIG_LESS_EQUAL; + break; + case OP_gt: + set_flag = SLJIT_SET_SIG_GREATER; + cond = SLJIT_SIG_GREATER; + break; + case OP_gte: + set_flag = SLJIT_SET_SIG_LESS; /* covers SIG_GREATER_EQUAL */ + cond = SLJIT_SIG_GREATER_EQUAL; + break; + default: + set_flag = SLJIT_SET_SIG_LESS; + cond = SLJIT_SIG_LESS; + break; + } + + /* Check sp[-1].tag == JS_TAG_INT */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + not_int1 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Check sp[-2].tag == JS_TAG_INT */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + not_int2 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Both are integers: load int32 values (sign-extended to word size) */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + + /* Branchless comparison: SUB sets flags, op_flags reads them. + * sljit_emit_op2u discards the SUB result, only sets flags. + * sljit_emit_op_flags writes 0 or 1 based on the condition. */ + sljit_emit_op2u(C, SLJIT_SUB | set_flag, SLJIT_R0, 0, SLJIT_R1, 0); + sljit_emit_op_flags(C, SLJIT_MOV, SLJIT_R0, 0, cond); + + /* Store result as JS_BOOL to sp[-2] */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF, + SLJIT_IMM, JS_TAG_BOOL); + /* sp-- */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path: call jit_op_relational(ctx, aux, opcode) */ + { + struct sljit_label *slow_label = sljit_emit_label(C); + sljit_set_label(not_int1, slow_label); + sljit_set_label(not_int2, slow_label); + } + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, opcode); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS3(W, P, P, W), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_op_relational)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + { + struct sljit_jump *exc_jump = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[(*n_exc_jumps)++] = exc_jump; + } + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * emit_binary_logic_fast: Inline integer AND/OR/XOR. + * + * Fast path: both operands are JS_TAG_INT → perform bitwise op inline. + * Slow path: call jit_op_binary_logic(ctx, aux, opcode). + */ +static void emit_binary_logic_fast(struct sljit_compiler *C, + int opcode, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *not_int1, *not_int2, *done; + sljit_s32 sljit_op; + + /* Map opcode to SLJIT operation */ + switch (opcode) { + case OP_and: sljit_op = SLJIT_AND; break; + case OP_or: sljit_op = SLJIT_OR; break; + case OP_xor: sljit_op = SLJIT_XOR; break; + default: sljit_op = SLJIT_AND; break; + } + + /* Check sp[-1].tag == JS_TAG_INT */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + not_int1 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Check sp[-2].tag == JS_TAG_INT */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + not_int2 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Both are integers: load int32 values */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + + /* Perform the bitwise operation */ + sljit_emit_op2(C, sljit_op, SLJIT_R0, 0, + SLJIT_R0, 0, SLJIT_R1, 0); + + /* Store result to sp[-2].u.int32 (tag stays JS_TAG_INT) */ + sljit_emit_op1(C, SLJIT_MOV_S32, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + /* sp-- */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path: call jit_op_binary_logic(ctx, aux, opcode) */ + { + struct sljit_label *slow_label = sljit_emit_label(C); + sljit_set_label(not_int1, slow_label); + sljit_set_label(not_int2, slow_label); + } + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, opcode); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS3(W, P, P, W), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_op_binary_logic)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + { + struct sljit_jump *exc_jump = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[(*n_exc_jumps)++] = exc_jump; + } + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * emit_return_fast: Inline return — store sp[-1] to aux->ret_val, + * set aux->sp = sp - 1, return 0. No helper call needed. + */ +static void emit_return_fast(struct sljit_compiler *C) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), + (sljit_sw)offsetof(JitAux, ret_val) + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), + (sljit_sw)offsetof(JitAux, ret_val) + JSV_TAG_OFF, + SLJIT_R1, 0); + sljit_emit_op2(C, SLJIT_SUB, SLJIT_R0, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + SLJIT_R0, 0); + sljit_emit_return(C, SLJIT_MOV, SLJIT_IMM, 0); +} + +/* + * emit_return_undef_fast: Inline return_undef — store JS_UNDEFINED + * to aux->ret_val, set aux->sp = sp, return 0. + */ +static void emit_return_undef_fast(struct sljit_compiler *C) +{ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), + (sljit_sw)offsetof(JitAux, ret_val) + JSV_U_OFF, + SLJIT_IMM, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), + (sljit_sw)offsetof(JitAux, ret_val) + JSV_TAG_OFF, + SLJIT_IMM, JS_TAG_UNDEFINED); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + sljit_emit_return(C, SLJIT_MOV, SLJIT_IMM, 0); +} + +static void emit_eq_fast(struct sljit_compiler *C, + int opcode, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *not_int1, *not_int2, *done; + sljit_s32 cond; + int is_eq = (opcode == OP_eq || opcode == OP_strict_eq); + int is_strict = (opcode == OP_strict_eq || opcode == OP_strict_neq); + cond = is_eq ? SLJIT_EQUAL : SLJIT_NOT_EQUAL; + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + not_int1 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + not_int2 = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + /* Both int: compare values branchlessly */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + + sljit_emit_op2u(C, SLJIT_SUB | SLJIT_SET_Z, SLJIT_R0, 0, SLJIT_R1, 0); + sljit_emit_op_flags(C, SLJIT_MOV, SLJIT_R0, 0, cond); + + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF, + SLJIT_IMM, JS_TAG_BOOL); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Not both int */ + { + struct sljit_label *not_int_label = sljit_emit_label(C); + sljit_set_label(not_int1, not_int_label); + sljit_set_label(not_int2, not_int_label); + } + + if (!is_strict) { + /* Non-strict ==: inline nullish comparison fast path. + * If at least one operand is null/undefined and neither is refcounted, + * resolve inline. Otherwise call C helper. */ + struct sljit_jump *any_refcounted, *tag1_is_nullish, *tag2_is_nullish; + struct sljit_jump *neither_nullish, *done_nullish1, *done_nullish2; + + /* R0 = tag1, R1 = tag2 */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + + /* Either refcounted → slow */ + sljit_emit_op2(C, SLJIT_OR, SLJIT_R2, 0, SLJIT_R0, 0, SLJIT_R1, 0); + any_refcounted = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_R2, 0, SLJIT_IMM, 0); + + /* (tag-2) unsigned <= 1 detects null(2) and undefined(3) */ + sljit_emit_op2(C, SLJIT_SUB, SLJIT_R2, 0, SLJIT_R0, 0, SLJIT_IMM, JS_TAG_NULL); + tag1_is_nullish = sljit_emit_cmp(C, SLJIT_LESS_EQUAL, + SLJIT_R2, 0, SLJIT_IMM, 1); + + sljit_emit_op2(C, SLJIT_SUB, SLJIT_R2, 0, SLJIT_R1, 0, SLJIT_IMM, JS_TAG_NULL); + tag2_is_nullish = sljit_emit_cmp(C, SLJIT_LESS_EQUAL, + SLJIT_R2, 0, SLJIT_IMM, 1); + + /* Neither nullish, both non-refcounted (e.g. bool==int) → slow path */ + neither_nullish = sljit_emit_jump(C, SLJIT_JUMP); + + /* tag2 nullish, tag1 not → false for == (null != int/bool) */ + sljit_set_label(tag2_is_nullish, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_IMM, is_eq ? 0 : 1); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF, + SLJIT_IMM, JS_TAG_BOOL); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done_nullish1 = sljit_emit_jump(C, SLJIT_JUMP); + + /* tag1 nullish: result = (tag2 also nullish) ? eq : !eq */ + sljit_set_label(tag1_is_nullish, sljit_emit_label(C)); + sljit_emit_op2(C, SLJIT_SUB, SLJIT_R2, 0, SLJIT_R1, 0, SLJIT_IMM, JS_TAG_NULL); + sljit_emit_op2u(C, SLJIT_SUB | SLJIT_SET_LESS_EQUAL, + SLJIT_R2, 0, SLJIT_IMM, 1); + sljit_emit_op_flags(C, SLJIT_MOV, SLJIT_R0, 0, + is_eq ? SLJIT_LESS_EQUAL : SLJIT_GREATER); + + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF, + SLJIT_IMM, JS_TAG_BOOL); + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done_nullish2 = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path: refcounted or non-nullish non-int types */ + { + struct sljit_label *slow_label = sljit_emit_label(C); + sljit_set_label(any_refcounted, slow_label); + sljit_set_label(neither_nullish, slow_label); + } + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, opcode); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS3(W, P, P, W), + SLJIT_IMM, (sljit_sw)(void *)jit_op_eq); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + exc_jumps[(*n_exc_jumps)++] = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + + { + struct sljit_label *end_label = sljit_emit_label(C); + sljit_set_label(done, end_label); + sljit_set_label(done_nullish1, end_label); + sljit_set_label(done_nullish2, end_label); + } + } else { + /* Strict eq/neq: just call helper */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, opcode); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS3(W, P, P, W), + SLJIT_IMM, (sljit_sw)(void *)jit_op_strict_eq); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + exc_jumps[(*n_exc_jumps)++] = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + sljit_set_label(done, sljit_emit_label(C)); + } +} + +static void emit_inc_dec_fast(struct sljit_compiler *C, + int is_dec, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *not_int, *overflow, *done; + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + not_int = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + + sljit_emit_op2(C, (is_dec ? SLJIT_SUB : SLJIT_ADD) | SLJIT_SET_OVERFLOW, + SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 1); + overflow = sljit_emit_jump(C, SLJIT_OVERFLOW); + + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + done = sljit_emit_jump(C, SLJIT_JUMP); + + { + struct sljit_label *slow_label = sljit_emit_label(C); + sljit_set_label(not_int, slow_label); + sljit_set_label(overflow, slow_label); + } + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, (sljit_sw)(is_dec ? (void *)jit_op_dec : (void *)jit_op_inc)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + { + struct sljit_jump *exc_jump = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[(*n_exc_jumps)++] = exc_jump; + } + + sljit_set_label(done, sljit_emit_label(C)); +} + +static void emit_push_this_fast(struct sljit_compiler *C, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *not_object, *done; + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_AUX), + (sljit_sw)offsetof(JitAux, this_obj) + JSV_TAG_OFF); + not_object = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_OBJECT); + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_AUX), + (sljit_sw)offsetof(JitAux, this_obj) + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), JSV_TAG_OFF, + SLJIT_IMM, JS_TAG_OBJECT); + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R2, 0, + SLJIT_MEM1(SLJIT_R1), 0); + sljit_emit_op2(C, SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); + sljit_emit_op1(C, SLJIT_MOV32, + SLJIT_MEM1(SLJIT_R1), 0, + SLJIT_R2, 0); + + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + sljit_set_label(not_object, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_op_push_this)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + { + struct sljit_jump *exc_jump = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[(*n_exc_jumps)++] = exc_jump; + } + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* ---- Inline logical-not fast path ---- + * If tag <= JS_TAG_UNDEFINED (INT, BOOL, NULL, UNDEFINED): + * result = !(int_val != 0) → written as js_bool(!val) + * Else: fall back to jit_op_lnot (handles strings, objects, floats) + * Never changes sp. + */ +static void emit_lnot_fast(struct sljit_compiler *C, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *slow_jump, *done; + + /* Load tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + + /* tag > JS_TAG_UNDEFINED (3) → slow path (unsigned compare: tags 0-3 are fast) */ + slow_jump = sljit_emit_cmp(C, SLJIT_GREATER, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_UNDEFINED); + + /* Fast path: load int32 value */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + + /* result = (val == 0) ? 1 : 0 i.e. !val */ + sljit_emit_op2u(C, SLJIT_SUB | SLJIT_SET_Z, + SLJIT_R0, 0, SLJIT_IMM, 0); + sljit_emit_op_flags(C, SLJIT_MOV, SLJIT_R0, 0, SLJIT_EQUAL); + + /* Write result as js_bool */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF, + SLJIT_IMM, JS_TAG_BOOL); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* Slow path: call C helper */ + sljit_set_label(slow_jump, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_op_lnot)); + /* lnot always returns 0 — no exception check needed, + but sp is still in aux->sp, reload is needed since helper wrote it */ + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + + sljit_set_label(done, sljit_emit_label(C)); + (void)exc_jumps; + (void)n_exc_jumps; +} + +/* ---- Inline to_propkey fast path ---- + * If tag is INT, STRING, or SYMBOL: no-op (already valid property key). + * Else: fall back to jit_op_to_propkey. + * ~90% of property keys are already one of these types. + */ +static void emit_to_propkey_fast(struct sljit_compiler *C, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *done_int, *done_str, *done_sym; + + /* Load tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + + /* Check INT (tag == 0) */ + done_int = sljit_emit_cmp(C, SLJIT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_INT); + /* Check STRING (tag == -7) */ + done_str = sljit_emit_cmp(C, SLJIT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_STRING); + /* Check SYMBOL (tag == -8) */ + done_sym = sljit_emit_cmp(C, SLJIT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_SYMBOL); + + /* Slow path: needs conversion */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_op_to_propkey)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + { + struct sljit_jump *exc_jump = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[(*n_exc_jumps)++] = exc_jump; + } + + /* All fast paths land here (no-op) */ + { + struct sljit_label *done_label = sljit_emit_label(C); + sljit_set_label(done_int, done_label); + sljit_set_label(done_str, done_label); + sljit_set_label(done_sym, done_label); + } +} + +/* ---- Inline insert3 fast path ---- + * insert3: a b c → c a b c (dup TOS, insert copy 3 deep) + * sp[0] = sp[-1]; sp[-1] = sp[-2]; sp[-2] = sp[-3]; + * sp[-3] = JS_DupValue(ctx, sp[0]); sp++; + * + * Fast path: if TOS tag >= 0 (non-refcounted: INT, BOOL, NULL, UNDEFINED, FLOAT64), + * just copy memory. Otherwise inline refcount bump. + */ +static void emit_insert3_fast(struct sljit_compiler *C) +{ + struct sljit_jump *not_refcounted; + + /* Load TOS value (sp[-1]) into R0(val), R1(tag) */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + + /* Shift stack: sp[-1] = sp[-2] */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF, + SLJIT_R2, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF, + SLJIT_R3, 0); + + /* sp[-2] = sp[-3] */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM1(REG_SP), -3 * JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, + SLJIT_MEM1(REG_SP), -3 * JSV_SIZE + JSV_TAG_OFF); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R2, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF, + SLJIT_R3, 0); + + /* sp[-3] = original TOS (R0/R1) — the dup target */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -3 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -3 * JSV_SIZE + JSV_TAG_OFF, + SLJIT_R1, 0); + + /* Also write sp[0] = original TOS (this is the new slot pushed) */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), JSV_TAG_OFF, + SLJIT_R1, 0); + + /* JS_DupValue: if tag >= 0, non-refcounted → done. + if tag < 0, refcounted → bump ref_count at offset 0 of ptr. */ + not_refcounted = sljit_emit_cmp(C, SLJIT_SIG_GREATER_EQUAL, + SLJIT_R1, 0, SLJIT_IMM, 0); + + /* Refcounted: R0 is the object pointer. ref_count is int32 at offset 0. */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R2, 0, + SLJIT_MEM1(SLJIT_R0), 0); + sljit_emit_op2(C, SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); + sljit_emit_op1(C, SLJIT_MOV32, + SLJIT_MEM1(SLJIT_R0), 0, + SLJIT_R2, 0); + + sljit_set_label(not_refcounted, sljit_emit_label(C)); + + /* sp++ */ + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, SLJIT_IMM, JSV_SIZE); +} + +/* ---- Inline insert2 fast path ---- + * insert2: a b → b a b (dup TOS, insert copy 2 deep) + * sp[0] = sp[-1]; sp[-1] = sp[-2]; sp[-2] = JS_DupValue(ctx, sp[0]); sp++; + */ +static void emit_insert2_fast(struct sljit_compiler *C) +{ + struct sljit_jump *not_refcounted; + + /* Load TOS (sp[-1]) into R0(val), R1(tag) */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + + /* sp[-1] = sp[-2] */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF, + SLJIT_R2, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF, + SLJIT_R3, 0); + + /* sp[-2] = original TOS (R0/R1) — the dup target */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF, + SLJIT_R1, 0); + + /* sp[0] = original TOS (new pushed slot) */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), JSV_TAG_OFF, + SLJIT_R1, 0); + + /* JS_DupValue inline: tag < 0 → refcounted → bump ref_count */ + not_refcounted = sljit_emit_cmp(C, SLJIT_SIG_GREATER_EQUAL, + SLJIT_R1, 0, SLJIT_IMM, 0); + + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R2, 0, + SLJIT_MEM1(SLJIT_R0), 0); + sljit_emit_op2(C, SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); + sljit_emit_op1(C, SLJIT_MOV32, + SLJIT_MEM1(SLJIT_R0), 0, + SLJIT_R2, 0); + + sljit_set_label(not_refcounted, sljit_emit_label(C)); + + /* sp++ */ + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, SLJIT_IMM, JSV_SIZE); +} + +#endif /* struct mode guards */ + +/* + * Dispatch macros: use fast-path versions in struct mode, + * fall back to original helpers in NaN-boxing/CHECK modes. + */ +#if !(defined(JS_NAN_BOXING) && JS_NAN_BOXING) && !defined(JS_CHECK_JSVALUE) +#define EMIT_GET_VAR(C, buf, idx) emit_get_var_fast(C, buf, idx) +#define EMIT_PUT_VAR(C, buf, idx) emit_put_var_fast(C, buf, idx) +#define EMIT_DROP(C) emit_drop_fast(C) +#define EMIT_BRANCH(C, sense) emit_branch_fast(C, sense) +#define EMIT_ADD(C, ej, nej) emit_add_sub_fast(C, 0, ej, nej) +#define EMIT_SUB(C, ej, nej) emit_add_sub_fast(C, 1, ej, nej) +#define EMIT_MUL(C, ej, nej) emit_mul_fast(C, ej, nej) +#define EMIT_SWAP(C) emit_swap_fast(C) +#define EMIT_DUP(C) emit_dup_fast(C) +#define EMIT_NIP(C) emit_nip_fast(C) +#define EMIT_RELATIONAL(C, op, ej, nej) emit_relational_fast(C, op, ej, nej) +#define EMIT_BINARY_LOGIC(C, op, ej, nej) emit_binary_logic_fast(C, op, ej, nej) +#define EMIT_RETURN(C) emit_return_fast(C) +#define EMIT_RETURN_UNDEF(C) emit_return_undef_fast(C) +#define EMIT_EQ(C, op, ej, nej) emit_eq_fast(C, op, ej, nej) +#define EMIT_INC_DEC(C, is_dec, ej, nej) emit_inc_dec_fast(C, is_dec, ej, nej) +#define EMIT_PUSH_THIS(C, ej, nej) emit_push_this_fast(C, ej, nej) +#define EMIT_LNOT(C, ej, nej) emit_lnot_fast(C, ej, nej) +#define EMIT_TO_PROPKEY(C, ej, nej) emit_to_propkey_fast(C, ej, nej) +#define EMIT_INSERT3(C) emit_insert3_fast(C) +#define EMIT_INSERT2(C) emit_insert2_fast(C) +#define EMIT_IS_UNDEF_OR_NULL(C, ej, nej) emit_is_undefined_or_null_fast(C, ej, nej) +#else +#define EMIT_GET_VAR(C, buf, idx) emit_get_var(C, buf, idx) +#define EMIT_PUT_VAR(C, buf, idx) emit_put_var(C, buf, idx) +#define EMIT_DROP(C) emit_drop(C) +#define EMIT_BRANCH(C, sense) emit_branch(C, sense) +#define EMIT_ADD(C, ej, nej) do { struct sljit_jump *_j = emit_arith(C, (void *)qjs_jit_add, NULL); (ej)[(*nej)++] = _j; } while(0) +#define EMIT_SUB(C, ej, nej) do { struct sljit_jump *_j = emit_arith(C, (void *)qjs_jit_sub, NULL); (ej)[(*nej)++] = _j; } while(0) +#define EMIT_MUL(C, ej, nej) do { struct sljit_jump *_j = emit_arith(C, (void *)qjs_jit_mul, NULL); (ej)[(*nej)++] = _j; } while(0) +#define EMIT_SWAP(C) emit_swap(C) +#define EMIT_DUP(C) emit_dup(C) +#define EMIT_NIP(C) emit_nip(C) +#define EMIT_RELATIONAL(C, op, ej, nej) do { (ej)[(*nej)++] = emit_op_call_int(C, (void *)jit_op_relational, op); } while(0) +#define EMIT_BINARY_LOGIC(C, op, ej, nej) do { (ej)[(*nej)++] = emit_op_call_int(C, (void *)jit_op_binary_logic, op); } while(0) +#define EMIT_RETURN(C) emit_return(C) +#define EMIT_RETURN_UNDEF(C) emit_return_undef(C) +#define EMIT_EQ(C, op, ej, nej) do { (ej)[(*nej)++] = emit_op_call_int(C, (void *)((op == OP_strict_eq || op == OP_strict_neq) ? (void *)jit_op_strict_eq : (void *)jit_op_eq), op); } while(0) +#define EMIT_INC_DEC(C, is_dec, ej, nej) do { (ej)[(*nej)++] = emit_op_call_2(C, (is_dec) ? (void *)jit_op_dec : (void *)jit_op_inc); } while(0) +#define EMIT_PUSH_THIS(C, ej, nej) do { (ej)[(*nej)++] = emit_op_call_2(C, (void *)jit_op_push_this); } while(0) +#define EMIT_LNOT(C, ej, nej) do { (ej)[(*nej)++] = emit_op_call_2(C, (void *)jit_op_lnot); } while(0) +#define EMIT_TO_PROPKEY(C, ej, nej) do { (ej)[(*nej)++] = emit_op_call_2(C, (void *)jit_op_to_propkey); } while(0) +#define EMIT_INSERT3(C) do { (void)emit_op_call_2(C, (void *)jit_op_insert3); } while(0) +#define EMIT_INSERT2(C) do { (void)emit_op_call_2(C, (void *)jit_op_insert2); } while(0) +#define EMIT_IS_UNDEF_OR_NULL(C, ej, nej) do { (ej)[(*nej)++] = emit_op_call_2(C, (void *)jit_op_is_undefined_or_null); } while(0) +#endif + +/* Emit a binary arithmetic op via extern helper (add/sub/mul). + * Calls qjs_jit_add/sub/mul(ctx, sp). Returns 0 ok, -1 exception. + * On success sp[-2] = result. Caller decrements sp. */ +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) +static struct sljit_jump *emit_arith(struct sljit_compiler *C, + void *helper_fn, + struct sljit_label **exc_label) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(helper_fn)); + /* Check return value: if non-zero, jump to exception */ + struct sljit_jump *exc_jump = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + /* sp-- on success */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + return exc_jump; +} +#endif + +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) +static void emit_return(struct sljit_compiler *C) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2V(P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_return)); + sljit_emit_return(C, SLJIT_MOV, SLJIT_IMM, 0); +} + +/* Emit return_undef: call jit_helper_return_undef(aux, sp), return 0 */ +static void emit_return_undef(struct sljit_compiler *C) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2V(P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_return_undef)); + sljit_emit_return(C, SLJIT_MOV, SLJIT_IMM, 0); +} +#endif + +/* + * Emit generator suspend: save sp + suspend_code into aux, then return 2. + * suspend_code: 0=AWAIT, 1=YIELD, 2=YIELD_STAR, 3=UNDEFINED (initial_yield/return_async) + * resume_pc: bytecode pointer where the interpreter should resume. + */ +static void emit_generator_suspend(struct sljit_compiler *C, sljit_sw suspend_code, + const uint8_t *resume_pc) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, suspend_code); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, SLJIT_IMM, (sljit_sw)resume_pc); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS4V(P, P, W, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_generator_suspend)); + sljit_emit_return(C, SLJIT_MOV, SLJIT_IMM, 2); +} + +/* Emit if_false/if_true branch. Calls jit_helper_to_bool_free(ctx, sp), + * decrements sp, then branches based on result. + * sense=0: branch if false (SLJIT_EQUAL to 0) + * sense=1: branch if true (SLJIT_NOT_EQUAL to 0) */ +#if (defined(JS_NAN_BOXING) && JS_NAN_BOXING) || defined(JS_CHECK_JSVALUE) +static struct sljit_jump *emit_branch(struct sljit_compiler *C, int sense) +{ + /* Call to_bool_free(ctx, sp) */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_SP, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_helper_to_bool_free)); + /* sp-- */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, JSV_SIZE); + /* Branch based on result */ + return sljit_emit_cmp(C, + sense ? SLJIT_NOT_EQUAL : SLJIT_EQUAL, + SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); +} +#endif + +/* Emit icall to func(ctx, aux). Saves/reloads sp/vbuf/abuf. + * Returns exc_jump (return != 0 means exception). + * Note: helpers return int (32-bit). On x64 MSVC, `mov eax, -1` zero-extends + * to 0x00000000FFFFFFFF which is positive for SLJIT_SIG_LESS. Use NOT_EQUAL. */ +static struct sljit_jump *emit_op_call_2(struct sljit_compiler *C, void *func) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(W, P, P), SLJIT_IMM, SLJIT_FUNC_ADDR(func)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + return sljit_emit_cmp(C, SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); +} + +/* Emit icall to func(ctx, aux, pc). pc is compile-time constant pointer. */ +static struct sljit_jump *emit_op_call_pc(struct sljit_compiler *C, void *func, const uint8_t *pc) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, (sljit_sw)pc); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS3(W, P, P, P), SLJIT_IMM, SLJIT_FUNC_ADDR(func)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + return sljit_emit_cmp(C, SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); +} + +/* Emit icall to func(ctx, aux, int_param). */ +static struct sljit_jump *emit_op_call_int(struct sljit_compiler *C, void *func, sljit_sw param) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, param); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS3(W, P, P, W), SLJIT_IMM, SLJIT_FUNC_ADDR(func)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + return sljit_emit_cmp(C, SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); +} + +/* Emit icall to func(ctx, aux, pc, ic). pc and ic are compile-time constants. */ +static struct sljit_jump *emit_op_call_pc_ic(struct sljit_compiler *C, void *func, + const uint8_t *pc, PropIC *ic) +{ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, (sljit_sw)pc); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R3, 0, SLJIT_IMM, (sljit_sw)ic); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS4(W, P, P, P, P), SLJIT_IMM, SLJIT_FUNC_ADDR(func)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + return sljit_emit_cmp(C, SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); +} + +static void emit_is_undefined_or_null_fast(struct sljit_compiler *C, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *is_refcounted, *is_nullish, *done_false, *done_true; + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + + is_refcounted = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_R0, 0, SLJIT_IMM, 0); + + /* (tag - 2) unsigned <= 1 matches null(2) and undefined(3) */ + sljit_emit_op2(C, SLJIT_SUB, SLJIT_R0, 0, SLJIT_R0, 0, + SLJIT_IMM, JS_TAG_NULL); + is_nullish = sljit_emit_cmp(C, SLJIT_LESS_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, 1); + + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF, + SLJIT_IMM, 0); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF, + SLJIT_IMM, JS_TAG_BOOL); + done_false = sljit_emit_jump(C, SLJIT_JUMP); + + sljit_set_label(is_nullish, sljit_emit_label(C)); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF, + SLJIT_IMM, 1); + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF, + SLJIT_IMM, JS_TAG_BOOL); + done_true = sljit_emit_jump(C, SLJIT_JUMP); + + { + struct sljit_label *slow_label = sljit_emit_label(C); + sljit_set_label(is_refcounted, slow_label); + exc_jumps[(*n_exc_jumps)++] = emit_op_call_2(C, (void *)jit_op_is_undefined_or_null); + } + + { + struct sljit_label *end_label = sljit_emit_label(C); + sljit_set_label(done_false, end_label); + sljit_set_label(done_true, end_label); + } +} + +/* + * Inline IC fast path for OP_get_field. + * On IC hit with obj ref_count > 1: fully inline (no function call). + * On miss/not-object/ref_count<=1: falls through to C helper. + * Uses only R0-R2 scratch registers (portable to all SLJIT targets). + */ +static void emit_get_field_ic_fast(struct sljit_compiler *C, + const uint8_t *pc, PropIC *ic, + const JitICLayout *layout, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *not_object, *ic_miss, *ref_low, *not_refcounted, *done; + struct sljit_label *slow_label; + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + not_object = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_OBJECT); + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(SLJIT_R0), layout->obj_shape_off); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM0(), (sljit_sw)&ic->cached_shape); + ic_miss = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, SLJIT_R1, 0, SLJIT_R2, 0); + + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R1, 0, + SLJIT_MEM1(SLJIT_R0), 0); + ref_low = sljit_emit_cmp(C, SLJIT_SIG_LESS_EQUAL, SLJIT_R1, 0, SLJIT_IMM, 1); + + sljit_emit_op2(C, SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); + sljit_emit_op1(C, SLJIT_MOV32, SLJIT_MEM1(SLJIT_R0), 0, SLJIT_R1, 0); + + sljit_emit_op1(C, SLJIT_MOV_U32, SLJIT_R1, 0, + SLJIT_MEM0(), (sljit_sw)&ic->cached_offset); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM1(SLJIT_R0), layout->obj_prop_off); + sljit_emit_op2(C, SLJIT_MUL, SLJIT_R1, 0, SLJIT_R1, 0, + SLJIT_IMM, layout->prop_size); + sljit_emit_op2(C, SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_R1, 0); + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(SLJIT_R2), JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(SLJIT_R2), JSV_TAG_OFF); + + not_refcounted = sljit_emit_cmp(C, SLJIT_SIG_GREATER_EQUAL, + SLJIT_R1, 0, SLJIT_IMM, 0); + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), 0); + sljit_emit_op2(C, SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); + sljit_emit_op1(C, SLJIT_MOV32, SLJIT_MEM1(SLJIT_R0), 0, SLJIT_R2, 0); + sljit_set_label(not_refcounted, sljit_emit_label(C)); + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF, + SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF, + SLJIT_R1, 0); + + done = sljit_emit_jump(C, SLJIT_JUMP); + + slow_label = sljit_emit_label(C); + sljit_set_label(not_object, slow_label); + sljit_set_label(ic_miss, slow_label); + sljit_set_label(ref_low, slow_label); + exc_jumps[(*n_exc_jumps)++] = emit_op_call_pc_ic(C, + (void *)jit_op_get_field_ic, pc, ic); + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * Inline IC fast path for OP_get_field2. + * Pushes value WITHOUT consuming the object (sp grows by 1). + * No ref_count check needed for old obj since it's not freed. + */ +static void emit_get_field2_ic_fast(struct sljit_compiler *C, + const uint8_t *pc, PropIC *ic, + const JitICLayout *layout, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *not_object, *ic_miss, *not_refcounted, *done; + struct sljit_label *slow_label; + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + not_object = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_OBJECT); + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(SLJIT_R0), layout->obj_shape_off); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM0(), (sljit_sw)&ic->cached_shape); + ic_miss = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, SLJIT_R1, 0, SLJIT_R2, 0); + + sljit_emit_op1(C, SLJIT_MOV_U32, SLJIT_R1, 0, + SLJIT_MEM0(), (sljit_sw)&ic->cached_offset); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM1(SLJIT_R0), layout->obj_prop_off); + sljit_emit_op2(C, SLJIT_MUL, SLJIT_R1, 0, SLJIT_R1, 0, + SLJIT_IMM, layout->prop_size); + sljit_emit_op2(C, SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_R1, 0); + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(SLJIT_R2), JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(SLJIT_R2), JSV_TAG_OFF); + + not_refcounted = sljit_emit_cmp(C, SLJIT_SIG_GREATER_EQUAL, + SLJIT_R1, 0, SLJIT_IMM, 0); + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), 0); + sljit_emit_op2(C, SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); + sljit_emit_op1(C, SLJIT_MOV32, SLJIT_MEM1(SLJIT_R0), 0, SLJIT_R2, 0); + sljit_set_label(not_refcounted, sljit_emit_label(C)); + + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), JSV_U_OFF, SLJIT_R0, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_SP), JSV_TAG_OFF, SLJIT_R1, 0); + sljit_emit_op2(C, SLJIT_ADD, REG_SP, 0, REG_SP, 0, SLJIT_IMM, JSV_SIZE); + + done = sljit_emit_jump(C, SLJIT_JUMP); + + slow_label = sljit_emit_label(C); + sljit_set_label(not_object, slow_label); + sljit_set_label(ic_miss, slow_label); + exc_jumps[(*n_exc_jumps)++] = emit_op_call_pc_ic(C, + (void *)jit_op_get_field2_ic, pc, ic); + + sljit_set_label(done, sljit_emit_label(C)); +} + +/* + * Inline IC fast path for OP_put_field. + * Fully inlines the IC-hit path when: + * 1. sp[-2] is an object with matching shape + * 2. Old property value is non-refcounted (tag >= 0) + * 3. New value (sp[-1]) is non-refcounted (tag >= 0) + * When both old and new are non-refcounted (int/bool/null/undefined), + * the entire put_field completes with zero function calls. + * Otherwise falls to the C helper for ref-counting. + * Uses only R0-R2 (portable to all SLJIT targets). + */ +static void emit_put_field_ic_fast(struct sljit_compiler *C, + const uint8_t *pc, PropIC *ic, + const JitICLayout *layout, + struct sljit_jump **exc_jumps, + int *n_exc_jumps) +{ + struct sljit_jump *not_object, *ic_miss, *old_refcounted, *new_refcounted; + struct sljit_jump *obj_ref_low, *done; + struct sljit_label *slow_label; + + /* 1. Check sp[-2].tag == JS_TAG_OBJECT */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_TAG_OFF); + not_object = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, + SLJIT_R0, 0, SLJIT_IMM, JS_TAG_OBJECT); + + /* 2. Load obj ptr, check shape match */ + /* R0 = obj ptr */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_SP), -2 * JSV_SIZE + JSV_U_OFF); + /* R1 = obj->shape */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(SLJIT_R0), layout->obj_shape_off); + /* R2 = ic->cached_shape */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM0(), (sljit_sw)&ic->cached_shape); + ic_miss = sljit_emit_cmp(C, SLJIT_NOT_EQUAL, SLJIT_R1, 0, SLJIT_R2, 0); + + /* 3. Compute property address: R1 = &prop[cached_offset] */ + /* R2 = ic->cached_offset */ + sljit_emit_op1(C, SLJIT_MOV_U32, SLJIT_R2, 0, + SLJIT_MEM0(), (sljit_sw)&ic->cached_offset); + /* R1 = obj->prop */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, + SLJIT_MEM1(SLJIT_R0), layout->obj_prop_off); + /* R2 = cached_offset * prop_size */ + sljit_emit_op2(C, SLJIT_MUL, SLJIT_R2, 0, SLJIT_R2, 0, + SLJIT_IMM, layout->prop_size); + /* R1 = &prop[cached_offset] (property address) */ + sljit_emit_op2(C, SLJIT_ADD, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_R2, 0); + + /* 4. Check old property value tag: if refcounted (tag < 0), go to helper */ + /* R2 = old_val.tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM1(SLJIT_R1), JSV_TAG_OFF); + old_refcounted = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_R2, 0, SLJIT_IMM, 0); + + /* 5. Check new value (sp[-1]) tag: if refcounted, go to helper */ + /* R2 = new_val.tag */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_TAG_OFF); + new_refcounted = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_R2, 0, SLJIT_IMM, 0); + + /* 6. FAST PATH: both old and new are non-refcounted. + * Store new value (u + tag) into property slot. */ + /* R2 already has new_val.tag, load new_val.u */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), JSV_TAG_OFF, + SLJIT_R2, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, + SLJIT_MEM1(REG_SP), -JSV_SIZE + JSV_U_OFF); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), JSV_U_OFF, + SLJIT_R2, 0); + + /* 7. Decrement obj->ref_count (R0 = obj ptr). + * If ref_count would reach 0, go to slow path. + * (We already checked shape match, so obj is valid.) */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_R1, 0, + SLJIT_MEM1(SLJIT_R0), 0); + obj_ref_low = sljit_emit_cmp(C, SLJIT_SIG_LESS_EQUAL, + SLJIT_R1, 0, SLJIT_IMM, 1); + sljit_emit_op2(C, SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); + sljit_emit_op1(C, SLJIT_MOV32, SLJIT_MEM1(SLJIT_R0), 0, SLJIT_R1, 0); + + /* 8. sp -= 2 */ + sljit_emit_op2(C, SLJIT_SUB, REG_SP, 0, REG_SP, 0, + SLJIT_IMM, 2 * JSV_SIZE); + done = sljit_emit_jump(C, SLJIT_JUMP); + + /* SLOW PATH: call jit_op_put_field_ic_hit for IC hit but refcounted values, + * or jit_op_put_field_ic for IC miss / not object / obj about to be freed */ + { + struct sljit_jump *done2; + struct sljit_label *hit_slow; + + /* IC hit but needs ref-counting: call jit_op_put_field_ic_hit(ctx, aux, ic) */ + hit_slow = sljit_emit_label(C); + sljit_set_label(old_refcounted, hit_slow); + sljit_set_label(new_refcounted, hit_slow); + sljit_set_label(obj_ref_low, hit_slow); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_MEM1(REG_AUX), + (sljit_sw)offsetof(JitAux, sp), REG_SP, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, (sljit_sw)ic); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS3(W, P, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_op_put_field_ic_hit)); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + done2 = sljit_emit_jump(C, SLJIT_JUMP); + + /* IC miss / not object: call full jit_op_put_field_ic(ctx, aux, pc, ic) */ + slow_label = sljit_emit_label(C); + sljit_set_label(not_object, slow_label); + sljit_set_label(ic_miss, slow_label); + exc_jumps[(*n_exc_jumps)++] = emit_op_call_pc_ic(C, + (void *)jit_op_put_field_ic, pc, ic); + + { + struct sljit_label *end_label = sljit_emit_label(C); + sljit_set_label(done, end_label); + sljit_set_label(done2, end_label); + } + } +} + + +/* ---- Bytecode analysis ---- */ + +/* Check if a function's bytecode can be JIT compiled (blacklist approach). + * Returns 1 if no blacklisted opcodes are found, 0 otherwise. + * If first_unsupported is non-NULL, stores the first unsupported opcode. */ +static int can_jit_compile(const uint8_t *bc, int bc_len, + int *first_unsupported) +{ + int pos = 0; + + if (first_unsupported) + *first_unsupported = -1; + + while (pos < bc_len) { + uint8_t op = bc[pos]; + if (op >= OP_COUNT) { + if (first_unsupported) *first_unsupported = op; + return 0; /* temporary or invalid opcode */ + } + + switch (op) { + /* No blacklisted opcodes currently */ + default: + break; + } + + pos += jit_opcode_size[op]; + } + return 1; +} + +/* ---- Main JIT compilation ---- */ + +void js_sljit_compile(JSContext *ctx, + uint8_t *byte_code_buf, int byte_code_len, + int arg_count, int var_count, int stack_size, + JitFunc *out_jitcode, void **out_jit_code_ptr, + JitDispatchEntry **out_dispatch_table, + int *out_dispatch_count, + PropIC **out_ic_cache, + int *out_ic_count) +{ + struct sljit_compiler *C; + struct sljit_label **labels = NULL; + uint8_t *is_target = NULL; + JitJumpPatch *deferred = NULL; + struct sljit_jump **exc_jumps = NULL; + int n_deferred = 0, max_deferred; + int n_exc_jumps = 0, max_exc_jumps; + int pos, bc_len; + const uint8_t *bc; + void *code; + JitDispatchEntry *dispatch_table = NULL; + int n_dispatch = 0, max_dispatch; + PropIC *ic_array = NULL; + int ic_total = 0, ic_idx = 0; + JitICLayout ic_layout; + + *out_jitcode = NULL; + *out_jit_code_ptr = NULL; + *out_dispatch_table = NULL; + *out_dispatch_count = 0; + *out_ic_cache = NULL; + *out_ic_count = 0; + + bc = byte_code_buf; + bc_len = byte_code_len; + if (bc_len <= 0) + return; + + /* Phase 0: check if function can be JIT compiled */ + { + int unsupported_op = -1; + if (!can_jit_compile(bc, bc_len, &unsupported_op)) { + return; + } + } + + jit_get_ic_layout(&ic_layout); + + /* Phase 1: identify branch targets */ + is_target = calloc(bc_len, 1); + if (!is_target) + return; + + for (pos = 0; pos < bc_len; ) { + uint8_t op = bc[pos]; + int target; + + switch (op) { + case OP_if_false: + case OP_if_true: + case OP_goto: + target = pos + 1 + (int32_t)get_u32(bc + pos + 1); + if (target >= 0 && target < bc_len) + is_target[target] = 1; + break; + case OP_if_false8: + case OP_if_true8: + case OP_goto8: + target = pos + 1 + (int8_t)bc[pos + 1]; + if (target >= 0 && target < bc_len) + is_target[target] = 1; + break; + case OP_goto16: + target = pos + 1 + (int16_t)get_u16(bc + pos + 1); + if (target >= 0 && target < bc_len) + is_target[target] = 1; + break; + case OP_catch: + /* Catch handler target */ + target = (int)((pos + 1) + (int32_t)get_u32(bc + pos + 1)); + if (target >= 0 && target < bc_len) + is_target[target] = 1; + break; + case OP_gosub: + /* Finally block target */ + target = (int)((pos + 1) + (int32_t)get_u32(bc + pos + 1)); + if (target >= 0 && target < bc_len) + is_target[target] = 1; + /* Gosub return position (after this instruction) */ + if (pos + 5 < bc_len) + is_target[pos + 5] = 1; + break; + case OP_with_get_var: + case OP_with_put_var: + case OP_with_delete_var: + case OP_with_make_ref: + case OP_with_get_ref: + case OP_with_get_ref_undef: + /* with_* branch target: pos + 5 + diff */ + { + int32_t diff = (int32_t)get_u32(bc + pos + 5); + target = pos + 5 + diff; + if (target >= 0 && target < bc_len) + is_target[target] = 1; + } + break; + } + + if (op == OP_initial_yield || op == OP_yield || + op == OP_yield_star || op == OP_async_yield_star || + op == OP_await || op == OP_return_async) { + int resume_pos = pos + jit_opcode_size[op]; + if (resume_pos >= 0 && resume_pos < bc_len) + is_target[resume_pos] = 1; + } + + pos += jit_opcode_size[op]; + } + + /* Count dispatch table entries (catch targets + gosub return sites) */ + max_dispatch = 0; + for (pos = 0; pos < bc_len; ) { + uint8_t op = bc[pos]; + if (op == OP_catch) + max_dispatch++; + if (op == OP_gosub) + max_dispatch++; /* return site */ + if (op == OP_initial_yield || op == OP_yield || + op == OP_yield_star || op == OP_async_yield_star || + op == OP_await || op == OP_return_async) + max_dispatch++; + pos += jit_opcode_size[op]; + } + + /* Allocate dispatch table */ + if (max_dispatch > 0) { + dispatch_table = js_mallocz(ctx, max_dispatch * sizeof(JitDispatchEntry)); + if (!dispatch_table) + goto fail; + } + + /* Count and allocate inline cache entries for property access */ + for (pos = 0; pos < bc_len; ) { + uint8_t op = bc[pos]; + if (op == OP_get_field || op == OP_get_field2 || op == OP_put_field) + ic_total++; + pos += jit_opcode_size[op]; + } + if (ic_total > 0) { + ic_array = js_mallocz(ctx, ic_total * sizeof(PropIC)); + if (!ic_array) + goto fail; + } + + /* Fill dispatch table entries */ + for (pos = 0; pos < bc_len; ) { + uint8_t op = bc[pos]; + if (op == OP_catch) { + int target = (int)((pos + 1) + (int32_t)get_u32(bc + pos + 1)); + dispatch_table[n_dispatch].bc_pos = target; + dispatch_table[n_dispatch].native_addr = NULL; + n_dispatch++; + } + if (op == OP_gosub) { + dispatch_table[n_dispatch].bc_pos = pos + 5; + dispatch_table[n_dispatch].native_addr = NULL; + n_dispatch++; + } + if (op == OP_initial_yield || op == OP_yield || + op == OP_yield_star || op == OP_async_yield_star || + op == OP_await || op == OP_return_async) { + int resume_pos = pos + jit_opcode_size[op]; + dispatch_table[n_dispatch].bc_pos = resume_pos; + dispatch_table[n_dispatch].native_addr = NULL; + n_dispatch++; + } + pos += jit_opcode_size[op]; + } + + /* Allocate label array */ + labels = calloc(bc_len, sizeof(struct sljit_label *)); + if (!labels) + goto fail; + + /* Allocate deferred jump list (max = number of branch instructions) */ + max_deferred = bc_len; /* overestimate */ + deferred = malloc(max_deferred * sizeof(JitJumpPatch)); + if (!deferred) + goto fail; + + /* Allocate exception jump list */ + max_exc_jumps = bc_len; + exc_jumps = malloc(max_exc_jumps * sizeof(struct sljit_jump *)); + if (!exc_jumps) + goto fail; + + /* Phase 2: create sljit compiler and emit code */ + C = sljit_create_compiler(NULL); + if (!C) + goto fail; + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (getenv("QJS_JIT_VERBOSE")) + sljit_compiler_verbose(C, stdout); +#endif + + /* Function entry: int jit_func(JSContext *ctx, JitAux *aux) + * S0 = ctx, S1 = aux (set by emit_enter from args) + * We use 5 scratch regs (R0-R4) and 5 saved regs (S0-S4) */ + sljit_emit_enter(C, 0, + SLJIT_ARGS2(W, P, P), + 5 /* scratches */, 5 /* saveds */, + 0 /* local_size */); + + /* Load sp, var_buf, arg_buf from aux struct */ + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + sljit_emit_op1(C, SLJIT_MOV, REG_VBUF, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, var_buf)); + sljit_emit_op1(C, SLJIT_MOV, REG_ABUF, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, arg_buf)); + + /* Generator resume: if resume_native_addr != NULL, jump to it */ + { + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, + SLJIT_MEM1(REG_AUX), + (sljit_sw)offsetof(JitAux, resume_native_addr)); + struct sljit_jump *not_resume = sljit_emit_cmp(C, SLJIT_EQUAL, + SLJIT_R0, 0, + SLJIT_IMM, 0); + sljit_emit_ijump(C, SLJIT_JUMP, SLJIT_R0, 0); + sljit_set_label(not_resume, sljit_emit_label(C)); + } + + /* Phase 3: emit code for each opcode */ + for (pos = 0; pos < bc_len; ) { + uint8_t op = bc[pos]; + int opsize = jit_opcode_size[op]; + + /* Emit label at branch targets */ + if (is_target[pos]) + labels[pos] = sljit_emit_label(C); + + switch (op) { + + /* ---- Push constants ---- */ + + case OP_push_i32: + emit_push_const_jsv(C, JS_MKVAL(JS_TAG_INT, (int32_t)get_u32(bc + pos + 1))); + break; + + case OP_push_minus1: + case OP_push_0: + case OP_push_1: + case OP_push_2: + case OP_push_3: + case OP_push_4: + case OP_push_5: + case OP_push_6: + case OP_push_7: + emit_push_const_jsv(C, JS_MKVAL(JS_TAG_INT, op - OP_push_0)); + break; + + case OP_push_i8: + emit_push_const_jsv(C, JS_MKVAL(JS_TAG_INT, (int8_t)bc[pos + 1])); + break; + + case OP_push_i16: + emit_push_const_jsv(C, JS_MKVAL(JS_TAG_INT, (int16_t)get_u16(bc + pos + 1))); + break; + + case OP_undefined: + emit_push_const_jsv(C, JS_UNDEFINED); + break; + + case OP_null: + emit_push_const_jsv(C, JS_NULL); + break; + + case OP_push_false: + emit_push_const_jsv(C, JS_FALSE); + break; + + case OP_push_true: + emit_push_const_jsv(C, JS_TRUE); + break; + + /* ---- Stack manipulation ---- */ + + case OP_drop: + EMIT_DROP(C); + break; + + case OP_dup: + EMIT_DUP(C); + break; + + case OP_swap: + EMIT_SWAP(C); + break; + + case OP_nip: + EMIT_NIP(C); + break; + + /* ---- Local variable access (3-byte: get_u16 index) ---- */ + + case OP_get_loc: + EMIT_GET_VAR(C, REG_VBUF, (sljit_sw)get_u16(bc + pos + 1)); + break; + case OP_put_loc: + EMIT_PUT_VAR(C, REG_VBUF, (sljit_sw)get_u16(bc + pos + 1)); + break; + case OP_set_loc: + emit_set_var(C, REG_VBUF, (sljit_sw)get_u16(bc + pos + 1)); + break; + + /* ---- Local variable access (2-byte: loc8 index) ---- */ + + case OP_get_loc8: + EMIT_GET_VAR(C, REG_VBUF, (sljit_sw)bc[pos + 1]); + break; + case OP_put_loc8: + EMIT_PUT_VAR(C, REG_VBUF, (sljit_sw)bc[pos + 1]); + break; + case OP_set_loc8: + emit_set_var(C, REG_VBUF, (sljit_sw)bc[pos + 1]); + break; + + /* ---- Local variable access (1-byte short forms) ---- */ + + case OP_get_loc0: EMIT_GET_VAR(C, REG_VBUF, 0); break; + case OP_get_loc1: EMIT_GET_VAR(C, REG_VBUF, 1); break; + case OP_get_loc2: EMIT_GET_VAR(C, REG_VBUF, 2); break; + case OP_get_loc3: EMIT_GET_VAR(C, REG_VBUF, 3); break; + + case OP_put_loc0: EMIT_PUT_VAR(C, REG_VBUF, 0); break; + case OP_put_loc1: EMIT_PUT_VAR(C, REG_VBUF, 1); break; + case OP_put_loc2: EMIT_PUT_VAR(C, REG_VBUF, 2); break; + case OP_put_loc3: EMIT_PUT_VAR(C, REG_VBUF, 3); break; + + case OP_set_loc0: emit_set_var(C, REG_VBUF, 0); break; + case OP_set_loc1: emit_set_var(C, REG_VBUF, 1); break; + case OP_set_loc2: emit_set_var(C, REG_VBUF, 2); break; + case OP_set_loc3: emit_set_var(C, REG_VBUF, 3); break; + + /* ---- Multi-load shortcut ---- */ + + case OP_get_loc0_loc1: + EMIT_GET_VAR(C, REG_VBUF, 0); + EMIT_GET_VAR(C, REG_VBUF, 1); + break; + + /* ---- Argument access (3-byte: get_u16 index) ---- */ + + case OP_get_arg: + EMIT_GET_VAR(C, REG_ABUF, (sljit_sw)get_u16(bc + pos + 1)); + break; + case OP_put_arg: + EMIT_PUT_VAR(C, REG_ABUF, (sljit_sw)get_u16(bc + pos + 1)); + break; + case OP_set_arg: + emit_set_var(C, REG_ABUF, (sljit_sw)get_u16(bc + pos + 1)); + break; + + /* ---- Argument access (1-byte short forms) ---- */ + + case OP_get_arg0: EMIT_GET_VAR(C, REG_ABUF, 0); break; + case OP_get_arg1: EMIT_GET_VAR(C, REG_ABUF, 1); break; + case OP_get_arg2: EMIT_GET_VAR(C, REG_ABUF, 2); break; + case OP_get_arg3: EMIT_GET_VAR(C, REG_ABUF, 3); break; + + case OP_put_arg0: EMIT_PUT_VAR(C, REG_ABUF, 0); break; + case OP_put_arg1: EMIT_PUT_VAR(C, REG_ABUF, 1); break; + case OP_put_arg2: EMIT_PUT_VAR(C, REG_ABUF, 2); break; + case OP_put_arg3: EMIT_PUT_VAR(C, REG_ABUF, 3); break; + + case OP_set_arg0: emit_set_var(C, REG_ABUF, 0); break; + case OP_set_arg1: emit_set_var(C, REG_ABUF, 1); break; + case OP_set_arg2: emit_set_var(C, REG_ABUF, 2); break; + case OP_set_arg3: emit_set_var(C, REG_ABUF, 3); break; + + /* ---- Arithmetic (via icall to quickjs.c helpers) ---- */ + + case OP_add: + EMIT_ADD(C, exc_jumps, &n_exc_jumps); + break; + case OP_sub: + EMIT_SUB(C, exc_jumps, &n_exc_jumps); + break; + case OP_mul: + EMIT_MUL(C, exc_jumps, &n_exc_jumps); + break; + + /* ---- Branches (5-byte: label32) ---- */ + + case OP_if_false: + case OP_if_true: { + int target = pos + 1 + (int32_t)get_u32(bc + pos + 1); + struct sljit_jump *j = EMIT_BRANCH(C, + op == OP_if_true ? 1 : 0); + if (labels[target]) { + sljit_set_label(j, labels[target]); + } else { + deferred[n_deferred].jump = j; + deferred[n_deferred].target_pc = target; + n_deferred++; + } + break; + } + + case OP_goto: { + int target = pos + 1 + (int32_t)get_u32(bc + pos + 1); + struct sljit_jump *j = sljit_emit_jump(C, SLJIT_JUMP); + if (labels[target]) { + sljit_set_label(j, labels[target]); + } else { + deferred[n_deferred].jump = j; + deferred[n_deferred].target_pc = target; + n_deferred++; + } + break; + } + + /* ---- Branches (2-byte: label8) ---- */ + + case OP_if_false8: + case OP_if_true8: { + int target = pos + 1 + (int8_t)bc[pos + 1]; + struct sljit_jump *j = EMIT_BRANCH(C, + op == OP_if_true8 ? 1 : 0); + if (labels[target]) { + sljit_set_label(j, labels[target]); + } else { + deferred[n_deferred].jump = j; + deferred[n_deferred].target_pc = target; + n_deferred++; + } + break; + } + + case OP_goto8: { + int target = pos + 1 + (int8_t)bc[pos + 1]; + struct sljit_jump *j = sljit_emit_jump(C, SLJIT_JUMP); + if (labels[target]) { + sljit_set_label(j, labels[target]); + } else { + deferred[n_deferred].jump = j; + deferred[n_deferred].target_pc = target; + n_deferred++; + } + break; + } + + /* ---- Branches (3-byte: label16) ---- */ + + case OP_goto16: { + int target = pos + 1 + (int16_t)get_u16(bc + pos + 1); + struct sljit_jump *j = sljit_emit_jump(C, SLJIT_JUMP); + if (labels[target]) { + sljit_set_label(j, labels[target]); + } else { + deferred[n_deferred].jump = j; + deferred[n_deferred].target_pc = target; + n_deferred++; + } + break; + } + + /* ---- Return ---- */ + + case OP_return: + EMIT_RETURN(C); + break; + + case OP_return_undef: + EMIT_RETURN_UNDEF(C); + break; + + /* ---- No-op ---- */ + + case OP_nop: + break; + + /* ---- Exception handling: OP_gosub (custom emitter) ---- */ + + case OP_gosub: { + int return_pos = pos + 5; + int target = pos + 1 + (int32_t)get_u32(bc + pos + 1); + /* Push js_int32(return_pos) onto JS stack */ + emit_push_const_jsv(C, JS_MKVAL(JS_TAG_INT, return_pos)); + /* Jump to target (finally block) */ + { + struct sljit_jump *j = sljit_emit_jump(C, SLJIT_JUMP); + if (labels[target]) { + sljit_set_label(j, labels[target]); + } else { + deferred[n_deferred].jump = j; + deferred[n_deferred].target_pc = target; + n_deferred++; + } + } + break; + } + + /* ---- Exception handling: OP_ret (custom emitter) ---- */ + + case OP_ret: { + /* Save sp to aux */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + /* Call qjs_jit_ret(ctx, aux) → returns native address */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(P, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(qjs_jit_ret)); + /* Reload sp from aux */ + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + /* Check: NULL = error → exception */ + { + struct sljit_jump *j = sljit_emit_cmp(C, SLJIT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[n_exc_jumps++] = j; + } + /* Indirect jump to native return address */ + sljit_emit_ijump(C, SLJIT_JUMP, SLJIT_RETURN_REG, 0); + break; + } + + /* ---- with_* opcodes (custom emitter with 3-way return) ---- */ + + case OP_with_get_var: + case OP_with_put_var: + case OP_with_delete_var: + case OP_with_make_ref: + case OP_with_get_ref: + case OP_with_get_ref_undef: { + int32_t diff = (int32_t)get_u32(bc + pos + 5); + int target = pos + 5 + diff; + + /* Save sp to aux */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + /* Call jit_op_with(ctx, aux, pc) */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, + (sljit_sw)(bc + pos)); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS3(W, P, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_op_with)); + /* Reload sp, var_buf, arg_buf from aux */ + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + sljit_emit_op1(C, SLJIT_MOV, REG_VBUF, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, var_buf)); + sljit_emit_op1(C, SLJIT_MOV, REG_ABUF, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, arg_buf)); + /* Sign-extend 32-bit int return to word size (required on Win64 + where mov eax,-1 zero-extends to 0x00000000FFFFFFFF) */ + sljit_emit_op1(C, SLJIT_MOV_S32, SLJIT_RETURN_REG, 0, + SLJIT_RETURN_REG, 0); + { + struct sljit_jump *j = sljit_emit_cmp(C, SLJIT_SIG_LESS, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + exc_jumps[n_exc_jumps++] = j; + } + { + struct sljit_jump *j = sljit_emit_cmp(C, SLJIT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 1); + if (labels[target]) { + sljit_set_label(j, labels[target]); + } else { + deferred[n_deferred].jump = j; + deferred[n_deferred].target_pc = target; + n_deferred++; + } + } + /* Return 0 = fall-through (not found), continue */ + break; + } + + /* ---- Push constants (not natively handled) ---- */ + case OP_push_const: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_push_const, bc + pos); break; + case OP_fclosure: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_fclosure, bc + pos); break; + case OP_push_atom_value: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_push_atom_value, bc + pos); break; + case OP_private_symbol: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_private_symbol, bc + pos); break; + case OP_push_this: EMIT_PUSH_THIS(C, exc_jumps, &n_exc_jumps); break; + case OP_object: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_object); break; + case OP_special_object: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_special_object, bc + pos); break; + case OP_rest: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_rest, bc + pos); break; + case OP_push_const8: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_push_const8, bc + pos); break; + case OP_fclosure8: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_fclosure8, bc + pos); break; + case OP_push_empty_string: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_push_literal, OP_push_empty_string); break; + case OP_push_bigint_i32: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_push_bigint_i32, bc + pos); break; + /* ---- Stack manipulation (not natively handled) ---- */ + case OP_nip1: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_nip1); break; + case OP_dup1: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_dup1); break; + case OP_dup2: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_dup2); break; + case OP_dup3: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_dup3); break; + case OP_insert2: EMIT_INSERT2(C); break; + case OP_insert3: EMIT_INSERT3(C); break; + case OP_insert4: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_insert4); break; + case OP_perm3: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_perm3); break; + case OP_perm4: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_perm4); break; + case OP_perm5: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_perm5); break; + case OP_swap2: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_swap2); break; + case OP_rot3l: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_rot3l); break; + case OP_rot3r: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_rot3r); break; + case OP_rot4l: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_rot4l); break; + case OP_rot5l: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_rot5l); break; + /* ---- Calls ---- */ + case OP_call: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_call, bc + pos); break; + case OP_call_constructor: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_call_constructor, bc + pos); break; + case OP_call_method: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_call_method, bc + pos); break; + case OP_tail_call: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_call, bc + pos); EMIT_RETURN(C); break; + case OP_tail_call_method: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_call_method, bc + pos); EMIT_RETURN(C); break; + case OP_array_from: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_array_from, bc + pos); break; + case OP_apply: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_apply, bc + pos); break; + case OP_call0: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_call_n, 0); break; + case OP_call1: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_call_n, 1); break; + case OP_call2: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_call_n, 2); break; + case OP_call3: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_call_n, 3); break; + /* ---- Constructor/brand ---- */ + case OP_check_ctor_return: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_check_ctor_return); break; + case OP_check_ctor: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_check_ctor); break; + case OP_init_ctor: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_init_ctor); break; + case OP_check_brand: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_check_brand); break; + case OP_add_brand: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_add_brand); break; + /* ---- Throw/eval/misc ---- */ + case OP_throw: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_throw); break; + case OP_throw_error: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_throw_error, bc + pos); break; + case OP_eval: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_eval, bc + pos); break; + case OP_apply_eval: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_apply_eval, bc + pos); break; + case OP_regexp: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_regexp); break; + case OP_get_super: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_get_super); break; + case OP_import: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_import); break; + /* ---- Global variables ---- */ + case OP_get_var: case OP_get_var_undef: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_get_var, bc + pos); break; + case OP_put_var: case OP_put_var_init: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_put_var, bc + pos); break; + case OP_check_define_var: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_check_define_var, bc + pos); break; + case OP_define_var: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_define_var, bc + pos); break; + case OP_define_func: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_define_func, bc + pos); break; + /* ---- Ref value ---- */ + case OP_get_ref_value: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_get_ref_value); break; + case OP_put_ref_value: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_put_ref_value); break; + /* ---- Property access ---- */ + case OP_get_field: { PropIC *ic = ic_array ? &ic_array[ic_idx++] : NULL; if (ic) emit_get_field_ic_fast(C, bc + pos, ic, &ic_layout, exc_jumps, &n_exc_jumps); else exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_get_field, bc + pos); break; } + case OP_get_field2: { PropIC *ic = ic_array ? &ic_array[ic_idx++] : NULL; if (ic) emit_get_field2_ic_fast(C, bc + pos, ic, &ic_layout, exc_jumps, &n_exc_jumps); else exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_get_field2, bc + pos); break; } + case OP_put_field: { PropIC *ic = ic_array ? &ic_array[ic_idx++] : NULL; if (ic) emit_put_field_ic_fast(C, bc + pos, ic, &ic_layout, exc_jumps, &n_exc_jumps); else exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_put_field, bc + pos); break; } + case OP_get_private_field: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_get_private_field); break; + case OP_put_private_field: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_put_private_field); break; + case OP_define_private_field: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_define_private_field); break; + case OP_get_array_el: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_get_array_el); break; + case OP_get_array_el2: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_get_array_el2); break; + case OP_put_array_el: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_put_array_el); break; + case OP_get_super_value: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_get_super_value); break; + case OP_put_super_value: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_put_super_value); break; + case OP_get_length: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_get_length); break; + /* ---- Define ---- */ + case OP_define_field: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_define_field, bc + pos); break; + case OP_set_name: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_set_name, bc + pos); break; + case OP_set_name_computed: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_set_name_computed); break; + case OP_set_proto: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_set_proto); break; + case OP_set_home_object: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_set_home_object); break; + case OP_define_array_el: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_define_array_el); break; + case OP_append: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_append); break; + case OP_copy_data_properties: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_copy_data_properties, bc + pos); break; + case OP_define_method: case OP_define_method_computed: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_define_method, bc + pos); break; + case OP_define_class: case OP_define_class_computed: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_define_class, bc + pos); break; + /* ---- Var ref (3-byte) ---- */ + case OP_get_var_ref: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_get_var_ref, (sljit_sw)get_u16(bc + pos + 1)); break; + case OP_put_var_ref: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_put_var_ref, (sljit_sw)get_u16(bc + pos + 1)); break; + case OP_set_var_ref: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_set_var_ref, (sljit_sw)get_u16(bc + pos + 1)); break; + case OP_get_var_ref_check: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_get_var_ref_check, bc + pos); break; + case OP_put_var_ref_check: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_put_var_ref_check, bc + pos); break; + case OP_put_var_ref_check_init: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_put_var_ref_check_init, bc + pos); break; + /* ---- Var ref short forms ---- */ + case OP_get_var_ref0: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_get_var_ref, 0); break; + case OP_get_var_ref1: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_get_var_ref, 1); break; + case OP_get_var_ref2: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_get_var_ref, 2); break; + case OP_get_var_ref3: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_get_var_ref, 3); break; + case OP_put_var_ref0: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_put_var_ref, 0); break; + case OP_put_var_ref1: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_put_var_ref, 1); break; + case OP_put_var_ref2: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_put_var_ref, 2); break; + case OP_put_var_ref3: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_put_var_ref, 3); break; + case OP_set_var_ref0: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_set_var_ref, 0); break; + case OP_set_var_ref1: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_set_var_ref, 1); break; + case OP_set_var_ref2: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_set_var_ref, 2); break; + case OP_set_var_ref3: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_set_var_ref, 3); break; + /* ---- Checked loc ---- */ + case OP_set_loc_uninitialized: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_set_loc_uninitialized, bc + pos); break; + case OP_get_loc_check: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_get_loc_check, bc + pos); break; + case OP_put_loc_check: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_put_loc_check, bc + pos); break; + case OP_put_loc_check_init: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_put_loc_check_init, bc + pos); break; + case OP_close_loc: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_close_loc, bc + pos); break; + /* ---- Exception ---- */ + case OP_catch: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_catch, bc + pos); break; + case OP_nip_catch: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_nip_catch); break; + /* ---- Conversion ---- */ + case OP_to_object: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_to_object); break; + case OP_to_propkey: EMIT_TO_PROPKEY(C, exc_jumps, &n_exc_jumps); break; + case OP_to_propkey2: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_to_propkey2); break; + /* ---- Make refs ---- */ + case OP_make_loc_ref: case OP_make_arg_ref: case OP_make_var_ref_ref: + exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_make_ref, bc + pos); break; + case OP_make_var_ref: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_make_var_ref, bc + pos); break; + /* ---- Iterators ---- */ + case OP_for_in_start: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_for_in_start); break; + case OP_for_of_start: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_for_of_start); break; + case OP_for_in_next: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_for_in_next); break; + case OP_for_of_next: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_for_of_next, bc + pos); break; + case OP_iterator_check_object: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_iterator_check_object); break; + case OP_iterator_get_value_done: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_iterator_get_value_done); break; + case OP_iterator_close: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_iterator_close); break; + case OP_iterator_next: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_iterator_next); break; + case OP_iterator_call: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_iterator_call, bc + pos); break; + /* ---- Unary arithmetic ---- */ + case OP_neg: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_neg); break; + case OP_plus: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_plus); break; + case OP_dec: EMIT_INC_DEC(C, 1, exc_jumps, &n_exc_jumps); break; + case OP_inc: EMIT_INC_DEC(C, 0, exc_jumps, &n_exc_jumps); break; + case OP_post_dec: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_post_dec); break; + case OP_post_inc: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_post_inc); break; + case OP_dec_loc: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_dec_loc, bc + pos); break; + case OP_inc_loc: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_inc_loc, bc + pos); break; + case OP_add_loc: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_add_loc, bc + pos); break; + case OP_not: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_not); break; + case OP_lnot: EMIT_LNOT(C, exc_jumps, &n_exc_jumps); break; + case OP_typeof: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_typeof); break; + case OP_delete: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_delete); break; + case OP_delete_var: exc_jumps[n_exc_jumps++] = emit_op_call_pc(C, (void *)jit_op_delete_var, bc + pos); break; + /* ---- Binary arithmetic (add/sub/mul already natively handled) ---- */ + case OP_div: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_div); break; + case OP_mod: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_mod); break; + case OP_pow: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_pow); break; + case OP_shl: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_shl); break; + case OP_sar: exc_jumps[n_exc_jumps++] = emit_op_call_int(C, (void *)jit_op_binary_logic, OP_sar); break; + case OP_shr: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_shr); break; + case OP_and: EMIT_BINARY_LOGIC(C, OP_and, exc_jumps, &n_exc_jumps); break; + case OP_xor: EMIT_BINARY_LOGIC(C, OP_xor, exc_jumps, &n_exc_jumps); break; + case OP_or: EMIT_BINARY_LOGIC(C, OP_or, exc_jumps, &n_exc_jumps); break; + /* ---- Comparison ---- */ + case OP_lt: EMIT_RELATIONAL(C, OP_lt, exc_jumps, &n_exc_jumps); break; + case OP_lte: EMIT_RELATIONAL(C, OP_lte, exc_jumps, &n_exc_jumps); break; + case OP_gt: EMIT_RELATIONAL(C, OP_gt, exc_jumps, &n_exc_jumps); break; + case OP_gte: EMIT_RELATIONAL(C, OP_gte, exc_jumps, &n_exc_jumps); break; + case OP_instanceof: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_instanceof); break; + case OP_in: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_in); break; + case OP_eq: EMIT_EQ(C, OP_eq, exc_jumps, &n_exc_jumps); break; + case OP_neq: EMIT_EQ(C, OP_neq, exc_jumps, &n_exc_jumps); break; + case OP_strict_eq: EMIT_EQ(C, OP_strict_eq, exc_jumps, &n_exc_jumps); break; + case OP_strict_neq: EMIT_EQ(C, OP_strict_neq, exc_jumps, &n_exc_jumps); break; + case OP_is_undefined_or_null: EMIT_IS_UNDEF_OR_NULL(C, exc_jumps, &n_exc_jumps); break; + case OP_private_in: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_private_in); break; + /* ---- Type checks ---- */ + case OP_is_undefined: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_is_undefined); break; + case OP_is_null: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_is_null); break; + case OP_typeof_is_undefined: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_typeof_is_undefined); break; + case OP_typeof_is_function: exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_typeof_is_function); break; + /* ---- Generator/async opcodes ---- */ + case OP_await: + emit_generator_suspend(C, 0, bc + pos + opsize); + break; + case OP_yield: + emit_generator_suspend(C, 1, bc + pos + opsize); + break; + case OP_yield_star: + case OP_async_yield_star: + emit_generator_suspend(C, 2, bc + pos + opsize); + break; + case OP_initial_yield: + case OP_return_async: + emit_generator_suspend(C, 3, bc + pos + opsize); + break; + case OP_for_await_of_start: + exc_jumps[n_exc_jumps++] = emit_op_call_2(C, (void *)jit_op_for_await_of_start); + break; + case OP_invalid: + default: + abort(); break; + } + + pos += opsize; + } + + /* Patch deferred forward jumps */ + { + int i; + for (i = 0; i < n_deferred; i++) { + struct sljit_label *lbl = labels[deferred[i].target_pc]; + if (!lbl) + goto fail_compiler; /* should not happen */ + sljit_set_label(deferred[i].jump, lbl); + } + } + + /* Emit exception handler: unwind stack, dispatch to catch or propagate */ + { + struct sljit_label *exc_label = sljit_emit_label(C); + int i; + for (i = 0; i < n_exc_jumps; i++) { + sljit_set_label(exc_jumps[i], exc_label); + } + /* Store current sp to aux */ + sljit_emit_op1(C, SLJIT_MOV, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp), + REG_SP, 0); + /* Call jit_unwind_exception(ctx, aux) → native addr or NULL */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R0, 0, REG_CTX, 0); + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R1, 0, REG_AUX, 0); + sljit_emit_icall(C, SLJIT_CALL, SLJIT_ARGS2(P, P, P), + SLJIT_IMM, SLJIT_FUNC_ADDR(jit_unwind_exception)); + /* Check: NULL = no handler found, propagate to caller */ + { + struct sljit_jump *no_handler = sljit_emit_cmp(C, SLJIT_EQUAL, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + /* Handler found — save native addr, reload registers, jump */ + sljit_emit_op1(C, SLJIT_MOV, SLJIT_R4, 0, + SLJIT_RETURN_REG, 0); + sljit_emit_op1(C, SLJIT_MOV, REG_SP, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, sp)); + sljit_emit_op1(C, SLJIT_MOV, REG_VBUF, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, var_buf)); + sljit_emit_op1(C, SLJIT_MOV, REG_ABUF, 0, + SLJIT_MEM1(REG_AUX), (sljit_sw)offsetof(JitAux, arg_buf)); + /* Indirect jump to catch handler native code */ + sljit_emit_ijump(C, SLJIT_JUMP, SLJIT_R4, 0); + + /* No handler — propagate exception to caller */ + sljit_set_label(no_handler, sljit_emit_label(C)); + sljit_emit_return(C, SLJIT_MOV, SLJIT_IMM, 1); + } + } + + /* Generate executable code */ + code = sljit_generate_code(C, 0, NULL); + if (!code) + goto fail_compiler; + + *out_jitcode = (JitFunc)code; + *out_jit_code_ptr = code; + + /* Fill in dispatch table native addresses from labels */ + { + int i; + for (i = 0; i < n_dispatch; i++) { + int bc_pos_i = dispatch_table[i].bc_pos; + if (bc_pos_i < 0 || bc_pos_i >= bc_len) { + dispatch_table[i].native_addr = NULL; + continue; + } + struct sljit_label *lbl = labels[bc_pos_i]; + if (lbl) { + dispatch_table[i].native_addr = + (void *)sljit_get_label_addr(lbl); + } else { + dispatch_table[i].native_addr = NULL; + } + } + *out_dispatch_table = dispatch_table; + *out_dispatch_count = n_dispatch; + dispatch_table = NULL; /* prevent free in cleanup */ + } + + *out_ic_cache = ic_array; + *out_ic_count = ic_total; + ic_array = NULL; /* prevent free in cleanup */ + + sljit_free_compiler(C); + free(is_target); + free(labels); + free(deferred); + free(exc_jumps); + return; + +fail_compiler: + sljit_free_compiler(C); +fail: + free(is_target); + free(labels); + free(deferred); + free(exc_jumps); + free(dispatch_table); + free(ic_array); + *out_jitcode = NULL; + *out_jit_code_ptr = NULL; + *out_dispatch_table = NULL; + *out_dispatch_count = 0; + *out_ic_cache = NULL; + *out_ic_count = 0; +} + +void js_sljit_free(void *jit_code_ptr) +{ + if (jit_code_ptr) + sljit_free_code(jit_code_ptr, NULL); +} diff --git a/sljit b/sljit new file mode 160000 index 000000000..35ffee1a8 --- /dev/null +++ b/sljit @@ -0,0 +1 @@ +Subproject commit 35ffee1a860baf9224d4643bdfbf1cb2ad930555