diff --git a/src/instance.zig b/src/instance.zig index 77a02c76..c252ebea 100644 --- a/src/instance.zig +++ b/src/instance.zig @@ -13,6 +13,7 @@ const Global = @import("store/global.zig").Global; const Elem = @import("store/elem.zig").Elem; const Data = @import("store/data.zig").Data; const VirtualMachine = @import("instance/vm.zig").VirtualMachine; +const Instruction = VirtualMachine.Instruction; const VirtualMachineOptions = struct { frame_stack_size: comptime_int = 1024, @@ -348,7 +349,7 @@ pub const Instance = struct { try vm.pushLabel(VirtualMachine.Label{ .return_arity = function.results.len, .op_stack_len = locals_start, - .branch_target = 0, + .branch_target = @as([*]Instruction, @ptrCast(&self.module.instructions.items[0])), }); // 8. Execute our function @@ -397,7 +398,7 @@ pub const Instance = struct { try vm.pushLabel(VirtualMachine.Label{ .return_arity = 0, .op_stack_len = locals_start, - .branch_target = 0, + .branch_target = @as([*]Instruction, @ptrCast(&self.module.instructions.items[0])), }); try vm.invoke(f.start); @@ -409,7 +410,7 @@ pub const Instance = struct { } } - pub fn invokeExpression(self: *Instance, start: usize, comptime Result: type, comptime options: VirtualMachineOptions) !Result { + pub fn invokeExpression(self: *Instance, start: [*]Instruction, comptime Result: type, comptime options: VirtualMachineOptions) !Result { var frame_stack: [options.frame_stack_size]VirtualMachine.Frame = [_]VirtualMachine.Frame{undefined} ** options.frame_stack_size; var label_stack: [options.label_stack_size]VirtualMachine.Label = [_]VirtualMachine.Label{undefined} ** options.label_stack_size; var op_stack: [options.operand_stack_size]u64 = [_]u64{0} ** options.operand_stack_size; @@ -428,6 +429,7 @@ pub const Instance = struct { try vm.pushLabel(VirtualMachine.Label{ .return_arity = 1, .op_stack_len = locals_start, + .branch_target = @as([*]Instruction, @ptrCast(&self.module.instructions.items[0])), }); try vm.invoke(start); diff --git a/src/instance/vm.zig b/src/instance/vm.zig index e351550e..69749e33 100644 --- a/src/instance/vm.zig +++ b/src/instance/vm.zig @@ -9,6 +9,8 @@ const ValType = @import("../module.zig").ValType; const Instance = @import("../instance.zig").Instance; const WasiPreopen = @import("../instance.zig").WasiPreopen; const Rr = @import("../rr.zig").Rr; +const rr = @import("../rr.zig"); +const immediate = rr.immediate; // VirtualMachine: // @@ -46,7 +48,7 @@ pub const VirtualMachine = struct { pub const Frame = struct { locals: []u64 = undefined, // TODO: we're in trouble if we move our stacks in memory - return_arity: usize = 0, + return_arity: usize, op_stack_len: usize, label_stack_len: usize, inst: *Instance, @@ -56,8 +58,8 @@ pub const VirtualMachine = struct { // // - code: the code we should interpret after `end` pub const Label = struct { - return_arity: usize = 0, - branch_target: usize = 0, + branch_target: [*]Instruction, + return_arity: usize, op_stack_len: usize, // u32? }; @@ -73,25 +75,15 @@ pub const VirtualMachine = struct { }; } - pub fn lookupWasiPreopen(self: *VirtualMachine, wasi_fd: os.wasi.fd_t) ?WasiPreopen { - return self.wasi_preopens.get(wasi_fd); + pub fn invoke(self: *VirtualMachine, ip: [*]Instruction) !void { + try @call(.auto, @as(InstructionFunction, @ptrCast(ip[0])), .{ self, ip }); } - pub fn getHostFd(self: *VirtualMachine, wasi_fd: wasi.fd_t) os.fd_t { - const preopen = self.lookupWasiPreopen(wasi_fd) orelse return wasi_fd; + // To avoid a recursive definition, define similar function pointer type we will cast to / from + pub const Instruction = *const fn (usize, usize) WasmError!void; + pub const InstructionFunction = *const fn (*VirtualMachine, [*]Instruction) WasmError!void; - return preopen.host_fd; - } - - pub fn invoke(self: *VirtualMachine, ip: usize) !void { - const instr = self.inst.module.parsed_code.items[ip]; - - try @call(.auto, lookup[@intFromEnum(instr)], .{ self, ip, self.inst.module.parsed_code.items }); - } - - const InstructionFunction = *const fn (*VirtualMachine, usize, []Rr) WasmError!void; - - const lookup = [256]InstructionFunction{ + pub const lookup = [256]InstructionFunction{ @"unreachable", nop, block, loop, @"if", @"else", if_no_else, impl_ni, impl_ni, impl_ni, impl_ni, end, br, br_if, br_table, @"return", call, call_indirect, fast_call, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, drop, select, select, impl_ni, impl_ni, impl_ni, @"local.get", @"local.set", @"local.tee", @"global.get", @"global.set", @"table.get", @"table.set", impl_ni, @"i32.load", @"i64.load", @"f32.load", @"f64.load", @"i32.load8_s", @"i32.load8_u", @"i32.load16_s", @"i32.load16_u", @@ -110,28 +102,26 @@ pub const VirtualMachine = struct { impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, misc, impl_ni, impl_ni, impl_ni, }; - inline fn dispatch(self: *VirtualMachine, next_ip: usize, code: []Rr) WasmError!void { - const next_instr = code[next_ip]; - - return try @call(.always_tail, lookup[@intFromEnum(next_instr)], .{ self, next_ip, code }); + inline fn dispatch(self: *VirtualMachine, next_ip: [*]Instruction) WasmError!void { + return try @call(.always_tail, @as(InstructionFunction, @ptrCast(next_ip[0])), .{ self, next_ip }); } pub const REF_NULL: u64 = 0xFFFF_FFFF_FFFF_FFFF; - fn impl_ni(_: *VirtualMachine, _: usize, _: []Rr) WasmError!void { + pub fn impl_ni(_: *VirtualMachine, _: [*]Instruction) WasmError!void { return error.NotImplemented; } - fn @"unreachable"(_: *VirtualMachine, _: usize, _: []Rr) WasmError!void { + pub fn @"unreachable"(_: *VirtualMachine, _: [*]Instruction) WasmError!void { return error.TrapUnreachable; } - fn nop(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - return dispatch(self, ip + 1, code); + pub fn nop(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + return dispatch(self, ip + 1); } - fn block(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].block; + pub fn block(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.block); try self.pushLabel(Label{ .return_arity = meta.return_arity, @@ -139,11 +129,11 @@ pub const VirtualMachine = struct { .branch_target = meta.branch_target, }); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn loop(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].loop; + pub fn loop(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.loop); try self.pushLabel(Label{ // note that we use block_params rather than block_returns for return arity: @@ -152,11 +142,11 @@ pub const VirtualMachine = struct { .branch_target = meta.branch_target, }); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"if"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"if"; + pub fn @"if"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"if"); const condition = self.popOperand(u32); try self.pushLabel(Label{ @@ -165,21 +155,21 @@ pub const VirtualMachine = struct { .branch_target = meta.branch_target, }); - return dispatch(self, if (condition == 0) meta.else_ip else ip + 1, code); + return dispatch(self, if (condition == 0) meta.else_ip else ip + 1); } - fn @"else"(self: *VirtualMachine, _: usize, code: []Rr) WasmError!void { + pub fn @"else"(self: *VirtualMachine, _: Instruction) WasmError!void { const label = self.popLabel(); - return dispatch(self, label.branch_target, code); + return dispatch(self, label.branch_target); } - fn if_no_else(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].if_no_else; + pub fn if_no_else(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.if_no_else); const condition = self.popOperand(u32); if (condition == 0) { - return dispatch(self, meta.branch_target, code); + return dispatch(self, meta.branch_target); } else { // We are inside the if branch try self.pushLabel(Label{ @@ -188,42 +178,42 @@ pub const VirtualMachine = struct { .branch_target = meta.branch_target, }); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } } - fn end(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn end(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { _ = self.popLabel(); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn br(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const next_ip = self.branch(code[ip].br); + pub fn br(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const next_ip = self.branch(immediate(ip, rr.br)); - return dispatch(self, next_ip, code); + return dispatch(self, next_ip); } - fn br_if(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn br_if(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const condition = self.popOperand(u32); - const next_ip = if (condition == 0) ip + 1 else self.branch(code[ip].br_if); + const next_ip = if (condition == 0) ip + 1 else self.branch(immediate(ip, rr.br_if)); - return dispatch(self, next_ip, code); + return dispatch(self, next_ip); } - fn br_table(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].br_table; + pub fn br_table(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.br_table); const i = self.popOperand(u32); const ls = self.inst.module.br_table_indices.items[meta.ls.offset .. meta.ls.offset + meta.ls.count]; const next_ip = if (i >= ls.len) self.branch(meta.ln) else self.branch(ls[i]); - return dispatch(self, next_ip, code); + return dispatch(self, next_ip); } - fn @"return"(self: *VirtualMachine, _: usize, _: []Rr) WasmError!void { + pub fn @"return"(self: *VirtualMachine, _: Instruction) WasmError!void { const frame = self.peekFrame(); const n = frame.return_arity; @@ -246,11 +236,11 @@ pub const VirtualMachine = struct { const previous_frame = self.peekFrame(); self.inst = previous_frame.inst; - return dispatch(self, label.branch_target, previous_frame.inst.module.parsed_code.items); + return dispatch(self, label.branch_target); } - fn call(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const funcidx = code[ip].call; + pub fn call(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const funcidx = immediate(ip, rr.call); const function = try self.inst.getFunc(funcidx); var next_ip = ip; @@ -288,11 +278,11 @@ pub const VirtualMachine = struct { }, } - return dispatch(self, next_ip, self.inst.module.parsed_code.items); + return dispatch(self, next_ip); } - fn call_indirect(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const call_indirect_instruction = code[ip].call_indirect; + pub fn call_indirect(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const call_indirect_instruction = immediate(ip, rr.call_indirect); var module = self.inst.module; const typeidx = call_indirect_instruction.typeidx; @@ -344,11 +334,11 @@ pub const VirtualMachine = struct { }, } - return dispatch(self, next_ip, self.inst.module.parsed_code.items); + return dispatch(self, next_ip); } - fn fast_call(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const f = code[ip].fast_call; + pub fn fast_call(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const f = immediate(ip, rr.fast_call); // Check we have enough stack space try self.checkStackSpace(f.required_stack_space + f.locals); @@ -371,15 +361,15 @@ pub const VirtualMachine = struct { .branch_target = ip + 1, }); - return dispatch(self, f.start, code); + return dispatch(self, f.start); } - fn drop(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn drop(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { _ = self.popAnyOperand(); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn select(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn select(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const condition = self.popOperand(u32); const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); @@ -390,60 +380,60 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, c2); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"local.get"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const localidx = code[ip].@"local.get"; + pub fn @"local.get"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const localidx = immediate(ip, rr.@"local.get"); const frame = self.peekFrame(); self.pushOperandNoCheck(u64, frame.locals[localidx]); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"local.set"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const localidx = code[ip].@"local.set"; + pub fn @"local.set"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const localidx = immediate(ip, rr.@"local.set"); const frame = self.peekFrame(); frame.locals[localidx] = self.popOperand(u64); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"local.tee"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const localidx = code[ip].@"local.tee"; + pub fn @"local.tee"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const localidx = immediate(ip, rr.@"local.tee"); const frame = self.peekFrame(); frame.locals[localidx] = self.peekOperand(); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"global.get"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const globalidx = code[ip].@"global.get"; + pub fn @"global.get"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const globalidx = immediate(ip, rr.@"global.get"); const global = try self.inst.getGlobal(globalidx); self.pushOperandNoCheck(u64, global.value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"global.set"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const globalidx = code[ip].@"global.set"; + pub fn @"global.set"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const globalidx = immediate(ip, rr.@"global.set"); const value = self.popAnyOperand(); const global = try self.inst.getGlobal(globalidx); global.value = value; - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"table.get"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const tableidx = code[ip].@"table.get"; + pub fn @"table.get"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const tableidx = immediate(ip, rr.@"table.get"); const table = try self.inst.getTable(tableidx); const index = self.popOperand(u32); @@ -455,11 +445,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, REF_NULL); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"table.set"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const tableidx = code[ip].@"table.set"; + pub fn @"table.set"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const tableidx = immediate(ip, rr.@"table.set"); const table = try self.inst.getTable(tableidx); const ref = self.popOperand(u64); @@ -467,11 +457,11 @@ pub const VirtualMachine = struct { try table.set(index, ref); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.load"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load"; + pub fn @"i32.load"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i32.load"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -479,11 +469,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.load"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load"; + pub fn @"i64.load"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.load"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -491,11 +481,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.load"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"f32.load"; + pub fn @"f32.load"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"f32.load"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -503,11 +493,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.load"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"f64.load"; + pub fn @"f64.load"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"f64.load"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -515,11 +505,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.load8_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load8_s"; + pub fn @"i32.load8_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i32.load8_s"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -527,11 +517,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.load8_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load8_u"; + pub fn @"i32.load8_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i32.load8_u"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -539,11 +529,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.load16_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load16_s"; + pub fn @"i32.load16_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i32.load16_s"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -551,11 +541,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.load16_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load16_u"; + pub fn @"i32.load16_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i32.load16_u"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -563,11 +553,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.load8_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load8_s"; + pub fn @"i64.load8_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.load8_s"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -575,11 +565,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.load8_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load8_u"; + pub fn @"i64.load8_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.load8_u"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -587,11 +577,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.load16_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load16_s"; + pub fn @"i64.load16_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.load16_s"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -599,11 +589,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.load16_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load16_u"; + pub fn @"i64.load16_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.load16_u"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -611,11 +601,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.load32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load32_s"; + pub fn @"i64.load32_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.load32_s"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -623,11 +613,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.load32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load32_u"; + pub fn @"i64.load32_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.load32_u"); const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); @@ -635,11 +625,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.store"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.store"; + pub fn @"i32.store"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i32.store"); const memory = try self.inst.getMemory(0); const value = self.popOperand(u32); @@ -647,11 +637,11 @@ pub const VirtualMachine = struct { try memory.write(u32, meta.offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.store"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.store"; + pub fn @"i64.store"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.store"); const memory = try self.inst.getMemory(0); const value = self.popOperand(u64); @@ -659,11 +649,11 @@ pub const VirtualMachine = struct { try memory.write(u64, meta.offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.store"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"f32.store"; + pub fn @"f32.store"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"f32.store"); const memory = try self.inst.getMemory(0); const value = self.popOperand(f32); @@ -671,11 +661,11 @@ pub const VirtualMachine = struct { try memory.write(f32, meta.offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.store"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"f64.store"; + pub fn @"f64.store"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"f64.store"); const memory = try self.inst.getMemory(0); const value = self.popOperand(f64); @@ -683,11 +673,11 @@ pub const VirtualMachine = struct { try memory.write(f64, meta.offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.store8"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.store8"; + pub fn @"i32.store8"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i32.store8"); const memory = try self.inst.getMemory(0); const value: u8 = @truncate(self.popOperand(u32)); @@ -695,11 +685,11 @@ pub const VirtualMachine = struct { try memory.write(u8, meta.offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.store16"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.store16"; + pub fn @"i32.store16"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i32.store16"); const memory = try self.inst.getMemory(0); const value: u16 = @truncate(self.popOperand(u32)); @@ -707,11 +697,11 @@ pub const VirtualMachine = struct { try memory.write(u16, meta.offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.store8"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.store8"; + pub fn @"i64.store8"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.store8"); const memory = try self.inst.getMemory(0); const value: u8 = @truncate(self.popOperand(u64)); @@ -719,11 +709,11 @@ pub const VirtualMachine = struct { try memory.write(u8, meta.offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.store16"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.store16"; + pub fn @"i64.store16"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.store16"); const memory = try self.inst.getMemory(0); const value: u16 = @truncate(self.popOperand(u64)); @@ -731,11 +721,11 @@ pub const VirtualMachine = struct { try memory.write(u16, meta.offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.store32"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.store32"; + pub fn @"i64.store32"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.@"i64.store32"); const memory = try self.inst.getMemory(0); const value: u32 = @truncate(self.popOperand(u64)); @@ -743,18 +733,18 @@ pub const VirtualMachine = struct { try memory.write(u32, meta.offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"memory.size"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"memory.size"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const memory = try self.inst.getMemory(0); self.pushOperandNoCheck(u32, @as(u32, @intCast(memory.size()))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"memory.grow"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"memory.grow"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const memory = try self.inst.getMemory(0); const num_pages = self.popOperand(u32); @@ -764,394 +754,394 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, @as(i32, -1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.const"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const instr = code[ip]; + pub fn @"i32.const"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const instr = immediate(ip, rr.@"i32.const"); self.pushOperandNoCheck(i32, instr.@"i32.const"); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.const"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const instr = code[ip]; + pub fn @"i64.const"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const instr = immediate(ip, rr.@"i64.const"); self.pushOperandNoCheck(i64, instr.@"i64.const"); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.const"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const instr = code[ip]; + pub fn @"f32.const"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const instr = immediate(ip, rr.@"f32.const"); self.pushOperandNoCheck(f32, instr.@"f32.const"); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.const"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const instr = code[ip]; + pub fn @"f64.const"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const instr = immediate(ip, rr.@"f64.const"); self.pushOperandNoCheck(f64, instr.@"f64.const"); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.eqz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.eqz"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 == 0) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.eq"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.eq"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 == c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.ne"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.ne"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 != c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.lt_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.lt_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.lt_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.lt_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.gt_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.gt_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.gt_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.gt_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.le_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.le_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.le_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.le_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.ge_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.ge_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.ge_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.ge_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.eqz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.eqz"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 == 0) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.eq"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.eq"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 == c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.ne"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.ne"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 != c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.lt_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.lt_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.lt_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.lt_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.gt_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.gt_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.gt_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.gt_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.le_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.le_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.le_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.le_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.ge_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.ge_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.ge_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.ge_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.eq"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.eq"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 == c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.ne"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.ne"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 != c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.lt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.lt"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.gt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.gt"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.le"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.le"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.ge"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.ge"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.eq"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.eq"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 == c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.ne"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.ne"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 != c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.lt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.lt"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.gt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.gt"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.le"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.le"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.ge"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.ge"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.clz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.clz"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @clz(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.ctz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.ctz"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @ctz(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.popcnt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.popcnt"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @popCount(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.add"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.add"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 +% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.sub"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.sub"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 -% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.mul"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.mul"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 *% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.div_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.div_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); @@ -1159,10 +1149,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, div); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.div_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.div_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); @@ -1170,10 +1160,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, div); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.rem_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.rem_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); @@ -1182,10 +1172,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, rem); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.rem_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.rem_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); @@ -1193,46 +1183,46 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, rem); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.and"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.and"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 & c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.or"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.or"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 | c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.xor"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.xor"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 ^ c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.shl"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.shl"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.shl(u32, c1, c2 % 32)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.shr_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.shr_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); @@ -1240,85 +1230,85 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, math.shr(i32, c1, mod)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.shr_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.shr_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.shr(u32, c1, c2 % 32)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.rotl"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.rotl"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.rotl(u32, c1, c2 % 32)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.rotr"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.rotr"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.rotr(u32, c1, c2 % 32)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.clz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.clz"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @clz(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.ctz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.ctz"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @ctz(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.popcnt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.popcnt"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @popCount(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.add"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.add"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 +% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.sub"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.sub"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 -% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.mul"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.mul"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 *% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.div_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.div_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); @@ -1326,10 +1316,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, div); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.div_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.div_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); @@ -1337,10 +1327,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, div); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.rem_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.rem_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); @@ -1349,10 +1339,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, rem); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.rem_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.rem_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); @@ -1360,46 +1350,46 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, rem); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.and"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.and"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 & c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.or"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.or"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 | c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.xor"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.xor"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 ^ c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.shl"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.shl"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.shl(u64, c1, c2 % 64)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.shr_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.shr_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); @@ -1407,77 +1397,77 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, math.shr(i64, c1, mod)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.shr_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.shr_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.shr(u64, c1, c2 % 64)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.rotl"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.rotl"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.rotl(u64, c1, c2 % 64)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.rotr"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.rotr"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.rotr(u64, c1, c2 % 64)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.abs"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.abs"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, math.fabs(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.neg"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.neg"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, -c1); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.ceil"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.ceil"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, @ceil(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.floor"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.floor"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, @floor(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.trunc"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.trunc"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, @trunc(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.nearest"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.nearest"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); const floor = @floor(c1); const ceil = @ceil(c1); @@ -1492,64 +1482,64 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f32, @round(c1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.sqrt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.sqrt"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, math.sqrt(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.add"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.add"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 + c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.sub"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.sub"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 - c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.mul"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.mul"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 * c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.div"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.div"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 / c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.min"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.min"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); if (math.isNan(c1)) { self.pushOperandNoCheck(f32, math.nan_f32); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (math.isNan(c2)) { self.pushOperandNoCheck(f32, math.nan_f32); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (c1 == 0.0 and c2 == 0.0) { @@ -1562,20 +1552,20 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f32, @min(c1, c2)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.max"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.max"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); if (math.isNan(c1)) { self.pushOperandNoCheck(f32, math.nan_f32); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (math.isNan(c2)) { self.pushOperandNoCheck(f32, math.nan_f32); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (c1 == 0.0 and c2 == 0.0) { @@ -1588,10 +1578,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f32, @max(c1, c2)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.copysign"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.copysign"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); @@ -1601,50 +1591,50 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f32, math.fabs(c1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.abs"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.abs"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, math.fabs(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.neg"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.neg"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, -c1); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.ceil"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.ceil"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, @ceil(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.floor"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.floor"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, @floor(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.trunc"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.trunc"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, @trunc(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.nearest"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.nearest"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); const floor = @floor(c1); const ceil = @ceil(c1); @@ -1659,60 +1649,60 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f64, @round(c1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.sqrt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.sqrt"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, math.sqrt(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.add"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.add"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 + c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.sub"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.sub"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 - c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.mul"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.mul"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 * c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.div"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.div"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 / c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.min"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.min"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); if (math.isNan(c1) or math.isNan(c2)) { self.pushOperandNoCheck(f64, math.nan_f64); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (c1 == 0.0 and c2 == 0.0) { @@ -1725,16 +1715,16 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f64, @min(c1, c2)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.max"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.max"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); if (math.isNan(c1) or math.isNan(c2)) { self.pushOperandNoCheck(f64, math.nan_f64); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (c1 == 0.0 and c2 == 0.0) { @@ -1747,10 +1737,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f64, @max(c1, c2)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.copysign"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.copysign"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); @@ -1760,18 +1750,18 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f64, math.fabs(c1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.wrap_i64"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.wrap_i64"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i32, @as(i32, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.trunc_f32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_f32_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); if (math.isNan(c1)) return error.InvalidConversion; @@ -1783,10 +1773,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, @as(i32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.trunc_f32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_f32_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); if (math.isNan(c1)) return error.InvalidConversion; @@ -1798,10 +1788,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, @as(u32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.trunc_f64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_f64_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); if (math.isNan(c1)) return error.InvalidConversion; @@ -1813,10 +1803,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, @as(i32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.trunc_f64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_f64_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); if (math.isNan(c1)) return error.InvalidConversion; @@ -1828,26 +1818,26 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, @as(u32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.extend_i32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend_i32_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @as(i32, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.extend_i32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend_i32_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u32, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.trunc_f32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_f32_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); if (math.isNan(c1)) return error.InvalidConversion; @@ -1859,10 +1849,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, @as(i64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.trunc_f32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_f32_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); if (math.isNan(c1)) return error.InvalidConversion; @@ -1874,10 +1864,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, @as(u64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.trunc_f64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_f64_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); if (math.isNan(c1)) return error.InvalidConversion; @@ -1889,10 +1879,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, @as(i64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.trunc_f64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_f64_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); if (math.isNan(c1)) return error.InvalidConversion; @@ -1904,168 +1894,168 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, @as(u64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.convert_i32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.convert_i32_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(f32, @as(f32, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.convert_i32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.convert_i32_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(f32, @as(f32, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.convert_i64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.convert_i64_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(f32, @as(f32, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.convert_i64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.convert_i64_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(f32, @as(f32, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.demote_f64"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.demote_f64"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f32, @as(f32, @floatCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.convert_i32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.convert_i32_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(f64, @as(f64, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.convert_i32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.convert_i32_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(f64, @as(f64, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.convert_i64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.convert_i64_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(f64, @as(f64, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.convert_i64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.convert_i64_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(f64, @as(f64, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.promote_f32"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.promote_f32"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f64, @as(f64, @floatCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.reinterpret_f32"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.reinterpret_f32"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(i32, @as(i32, @bitCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.reinterpret_f64"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.reinterpret_f64"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(i64, @as(i64, @bitCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f32.reinterpret_i32"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.reinterpret_i32"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(f32, @as(f32, @bitCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"f64.reinterpret_i64"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.reinterpret_i64"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(f64, @as(f64, @bitCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.extend8_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.extend8_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(i32, @as(i8, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.extend16_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.extend16_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(i32, @as(i16, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.extend8_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend8_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @as(i8, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.extend16_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend16_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @as(i16, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.extend32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend32_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @as(i32, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"ref.null"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"ref.null"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { self.pushOperandNoCheck(u64, REF_NULL); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"ref.is_null"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"ref.is_null"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const value = self.popOperand(u64); if (value == REF_NULL) { @@ -2074,21 +2064,21 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, 0); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"ref.func"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const funcidx = code[ip].@"ref.func"; + pub fn @"ref.func"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const funcidx = immediate(ip, rr.@"ref.func"); const ref = self.inst.funcaddrs.items[funcidx]; // Not sure about this at all, this could still coincidentally be zero? self.pushOperandNoCheck(u64, ref); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn misc(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - return miscDispatch(self, ip, code); + pub fn misc(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + return miscDispatch(self, ip); } const misc_lookup = [18]InstructionFunction{ @@ -2096,190 +2086,190 @@ pub const VirtualMachine = struct { @"table.size", @"table.fill", }; - inline fn miscDispatch(self: *VirtualMachine, next_ip: usize, code: []Rr) WasmError!void { - const next_instr = code[next_ip].misc; + inline fn miscDispatch(self: *VirtualMachine, next_ip: [*]Instruction) WasmError!void { + // const next_instr = code[next_ip].misc; THIS IS WRONG - return try @call(.always_tail, misc_lookup[@intFromEnum(next_instr)], .{ self, next_ip, code }); + return try @call(.always_tail, next_ip, .{ self, next_ip }); } - fn @"i32.trunc_sat_f32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_sat_f32_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i32, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc >= @as(f32, @floatFromInt(math.maxInt(i32)))) { self.pushOperandNoCheck(i32, @as(i32, @bitCast(@as(u32, 0x7fffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc < @as(f32, @floatFromInt(math.minInt(i32)))) { self.pushOperandNoCheck(i32, @as(i32, @bitCast(@as(u32, 0x80000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } self.pushOperandNoCheck(i32, @as(i32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.trunc_sat_f32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_sat_f32_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u32, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc >= @as(f32, @floatFromInt(math.maxInt(u32)))) { self.pushOperandNoCheck(u32, @as(u32, @bitCast(@as(u32, 0xffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc < @as(f32, @floatFromInt(math.minInt(u32)))) { self.pushOperandNoCheck(u32, @as(u32, @bitCast(@as(u32, 0x00000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } self.pushOperandNoCheck(u32, @as(u32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.trunc_sat_f64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_sat_f64_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i32, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc >= @as(f64, @floatFromInt(math.maxInt(i32)))) { self.pushOperandNoCheck(i32, @as(i32, @bitCast(@as(u32, 0x7fffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc < @as(f64, @floatFromInt(math.minInt(i32)))) { self.pushOperandNoCheck(i32, @as(i32, @bitCast(@as(u32, 0x80000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } self.pushOperandNoCheck(i32, @as(i32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i32.trunc_sat_f64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_sat_f64_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u32, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc >= @as(f64, @floatFromInt(math.maxInt(u32)))) { self.pushOperandNoCheck(u32, @as(u32, @bitCast(@as(u32, 0xffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc < @as(f64, @floatFromInt(math.minInt(u32)))) { self.pushOperandNoCheck(u32, @as(u32, @bitCast(@as(u32, 0x00000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } self.pushOperandNoCheck(u32, @as(u32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.trunc_sat_f32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_sat_f32_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i64, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc >= @as(f32, @floatFromInt(math.maxInt(i64)))) { self.pushOperandNoCheck(i64, @as(i64, @bitCast(@as(u64, 0x7fffffffffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc < @as(f32, @floatFromInt(math.minInt(i64)))) { self.pushOperandNoCheck(i64, @as(i64, @bitCast(@as(u64, 0x8000000000000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } self.pushOperandNoCheck(i64, @as(i64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.trunc_sat_f32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_sat_f32_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u64, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc >= @as(f32, @floatFromInt(math.maxInt(u64)))) { self.pushOperandNoCheck(u64, @as(u64, @bitCast(@as(u64, 0xffffffffffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc < @as(f32, @floatFromInt(math.minInt(u64)))) { self.pushOperandNoCheck(u64, @as(u64, @bitCast(@as(u64, 0x0000000000000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } self.pushOperandNoCheck(u64, @as(u64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.trunc_sat_f64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_sat_f64_s"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i64, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc >= @as(f64, @floatFromInt(math.maxInt(i64)))) { self.pushOperandNoCheck(i64, @as(i64, @bitCast(@as(u64, 0x7fffffffffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc < @as(f64, @floatFromInt(math.minInt(i64)))) { self.pushOperandNoCheck(i64, @as(i64, @bitCast(@as(u64, 0x8000000000000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } self.pushOperandNoCheck(i64, @as(i64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"i64.trunc_sat_f64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_sat_f64_u"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u64, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc >= @as(f64, @floatFromInt(math.maxInt(u64)))) { self.pushOperandNoCheck(u64, @as(u64, @bitCast(@as(u64, 0xffffffffffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (trunc < @as(f64, @floatFromInt(math.minInt(u64)))) { self.pushOperandNoCheck(u64, @as(u64, @bitCast(@as(u64, 0x0000000000000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } self.pushOperandNoCheck(u64, @as(u64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"memory.init"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"memory.init"; + pub fn @"memory.init"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.misc.@"memory.init"); const n = self.popOperand(u32); const src = self.popOperand(u32); @@ -2292,7 +2282,7 @@ pub const VirtualMachine = struct { if (@as(u33, src) + @as(u33, n) > data.data.len) return error.OutOfBoundsMemoryAccess; if (@as(u33, dest) + @as(u33, n) > mem_size) return error.OutOfBoundsMemoryAccess; if (n == 0) { - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } if (data.dropped) return error.OutOfBoundsMemoryAccess; @@ -2302,18 +2292,18 @@ pub const VirtualMachine = struct { try memory.write(u8, 0, dest + i, data.data[src + i]); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"data.drop"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const dataidx = code[ip].misc.@"data.drop"; + pub fn @"data.drop"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const dataidx = immediate(ip, rr.misc.@"data.drop"); const data = try self.inst.getData(dataidx); data.dropped = true; - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"memory.copy"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"memory.copy"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const n = self.popOperand(u32); const src = self.popOperand(u32); const dest = self.popOperand(u32); @@ -2325,7 +2315,7 @@ pub const VirtualMachine = struct { if (@as(u33, dest) + @as(u33, n) > mem_size) return error.OutOfBoundsMemoryAccess; if (n == 0) { - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } // FIXME: move initial bounds check into Memory implementation @@ -2336,10 +2326,10 @@ pub const VirtualMachine = struct { memory.uncheckedCopyBackwards(dest, data[src .. src + n]); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"memory.fill"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"memory.fill"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { const n = self.popOperand(u32); const value = self.popOperand(u32); const dest = self.popOperand(u32); @@ -2349,16 +2339,16 @@ pub const VirtualMachine = struct { if (@as(u33, dest) + @as(u33, n) > mem_size) return error.OutOfBoundsMemoryAccess; if (n == 0) { - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } memory.uncheckedFill(dest, n, @as(u8, @truncate(value))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"table.init"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.init"; + pub fn @"table.init"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.misc.@"table.init"); const tableidx = meta.tableidx; const elemidx = meta.elemidx; @@ -2383,20 +2373,20 @@ pub const VirtualMachine = struct { try table.set(d + i, elem.elem[s + i]); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"elem.drop"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"elem.drop"; + pub fn @"elem.drop"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.misc.@"elem.drop"); const elemidx = meta.elemidx; const elem = try self.inst.getElem(elemidx); elem.dropped = true; - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"table.copy"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.copy"; + pub fn @"table.copy"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.misc.@"table.copy"); const dest_tableidx = meta.dest_tableidx; const src_tableidx = meta.src_tableidx; @@ -2426,11 +2416,11 @@ pub const VirtualMachine = struct { } } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"table.grow"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.grow"; + pub fn @"table.grow"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.misc.@"table.grow"); const tableidx = meta.tableidx; const table = try self.inst.getTable(tableidx); @@ -2448,22 +2438,22 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, @as(i32, -1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"table.size"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.size"; + pub fn @"table.size"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.misc.@"table.size"); const tableidx = meta.tableidx; const table = try self.inst.getTable(tableidx); self.pushOperandNoCheck(u32, @as(u32, @intCast(table.size()))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } - fn @"table.fill"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.fill"; + pub fn @"table.fill"(self: *VirtualMachine, ip: [*]Instruction) WasmError!void { + const meta = immediate(ip, rr.misc.@"table.fill"); const tableidx = meta.tableidx; const table = try self.inst.getTable(tableidx); @@ -2482,7 +2472,7 @@ pub const VirtualMachine = struct { try table.set(d + i, ref); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1); } // https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-control-mathsf-br-l @@ -2556,11 +2546,11 @@ pub const VirtualMachine = struct { return self.op_stack[self.op_ptr - 1]; } - fn peekOperand(self: *VirtualMachine) u64 { + pub fn peekOperand(self: *VirtualMachine) u64 { return self.op_stack[self.op_ptr - 1]; } - fn peekNthOperand(self: *VirtualMachine, index: u32) u64 { + pub fn peekNthOperand(self: *VirtualMachine, index: u32) u64 { return self.op_stack[self.op_ptr - index - 1]; } @@ -2584,7 +2574,7 @@ pub const VirtualMachine = struct { return self.frame_stack[self.frame_ptr - 1]; } - fn peekFrame(self: *VirtualMachine) *Frame { + pub fn peekFrame(self: *VirtualMachine) *Frame { return &self.frame_stack[self.frame_ptr - 1]; } @@ -2606,7 +2596,7 @@ pub const VirtualMachine = struct { // // Returns nth label on the Label stack relative to the top of the stack // - fn peekNthLabel(self: *VirtualMachine, index: u32) *Label { + pub fn peekNthLabel(self: *VirtualMachine, index: u32) *Label { return &self.label_stack[self.label_ptr - index - 1]; } diff --git a/src/module.zig b/src/module.zig index d41584f7..9cba0d19 100644 --- a/src/module.zig +++ b/src/module.zig @@ -4,7 +4,10 @@ const leb = std.leb; const math = std.math; const unicode = std.unicode; const ArrayList = std.ArrayList; -const Rr = @import("rr.zig").Rr; +const VirtualMachine = @import("instance/vm.zig").VirtualMachine; +const Instruction = VirtualMachine.Instruction; +const InstructionFunction = VirtualMachine.InstructionFunction; +// const Rr = @import("rr.zig").Rr; const RrOpcode = @import("rr.zig").RrOpcode; const Instance = @import("instance.zig").Instance; const Parser = @import("module/parser.zig").Parser; @@ -34,7 +37,7 @@ pub const Module = struct { function_index_start: ?usize = null, data_count: ?u32 = null, element_init_offsets: ArrayList(usize), - parsed_code: ArrayList(Rr), + instructions: ArrayList(VirtualMachine.InstructionFunction), local_types: ArrayList(LocalType), br_table_indices: ArrayList(u32), references: ArrayList(u32), @@ -56,7 +59,7 @@ pub const Module = struct { .codes = Section(Code).init(alloc), .datas = Section(DataSegment).init(alloc), .element_init_offsets = ArrayList(usize).init(alloc), - .parsed_code = ArrayList(Rr).init(alloc), + .instructions = ArrayList(VirtualMachine.InstructionFunction).init(alloc), .local_types = ArrayList(LocalType).init(alloc), .br_table_indices = ArrayList(u32).init(alloc), .references = ArrayList(u32).init(alloc), @@ -77,7 +80,7 @@ pub const Module = struct { self.datas.deinit(); self.element_init_offsets.deinit(); - self.parsed_code.deinit(); + self.instructions.deinit(); self.local_types.deinit(); self.br_table_indices.deinit(); self.references.deinit(); @@ -96,7 +99,7 @@ pub const Module = struct { // Push an initial return instruction so we don't have to // track the end of a function to use its return on invoke // See https://github.com/malcolmstill/zware/pull/133 - try self.parsed_code.append(.@"return"); + try self.instructions.append(@as(InstructionFunction, @ptrCast(&VirtualMachine.@"return"))); var i: usize = 0; while (true) : (i += 1) { @@ -474,9 +477,9 @@ pub const Module = struct { try self.references.append(funcidx); - const init_offset = self.parsed_code.items.len; - try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); - try self.parsed_code.append(Rr.@"return"); + const init_offset = self.instructions.items.len; + try self.instructions.append(VirtualMachine.@"ref.func"); + try self.instructions.append(VirtualMachine.@"return"); try self.element_init_offsets.append(init_offset); } @@ -505,9 +508,9 @@ pub const Module = struct { try self.references.append(funcidx); - const init_offset = self.parsed_code.items.len; - try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); - try self.parsed_code.append(Rr.@"return"); + const init_offset = self.instructions.items.len; + try self.instructions.append(VirtualMachine.@"ref.func"); + try self.instructions.append(VirtualMachine.@"return"); try self.element_init_offsets.append(init_offset); } @@ -538,9 +541,9 @@ pub const Module = struct { try self.references.append(funcidx); - const init_offset = self.parsed_code.items.len; - try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); - try self.parsed_code.append(Rr.@"return"); + const init_offset = self.instructions.items.len; + try self.instructions.append(VirtualMachine.@"ref.func"); + try self.instructions.append(VirtualMachine.@"return"); try self.element_init_offsets.append(init_offset); } @@ -568,9 +571,9 @@ pub const Module = struct { try self.references.append(funcidx); - const init_offset = self.parsed_code.items.len; - try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); - try self.parsed_code.append(Rr.@"return"); + const init_offset = self.instructions.items.len; + try self.instructions.append(VirtualMachine.@"ref.func"); + try self.instructions.append(VirtualMachine.@"return"); try self.element_init_offsets.append(init_offset); } @@ -615,7 +618,7 @@ pub const Module = struct { var j: usize = 0; while (j < expr_count) : (j += 1) { - const init_offset = self.parsed_code.items.len; + const init_offset = self.instructions.items.len; _ = try self.readConstantExpression(.FuncRef); try self.element_init_offsets.append(init_offset); } @@ -662,7 +665,7 @@ pub const Module = struct { const count = try self.readULEB128(u32); self.codes.count = count; - try self.parsed_code.ensureTotalCapacity(count * 32); + try self.instructions.ensureTotalCapacity(count * 32); if (count == 0) return; diff --git a/src/module/parser.zig b/src/module/parser.zig index 0c8f8d24..8aa76f2f 100644 --- a/src/module/parser.zig +++ b/src/module/parser.zig @@ -12,29 +12,38 @@ const ValType = @import("../valtype.zig").ValType; const RefType = @import("../valtype.zig").RefType; const Range = @import("../rr.zig").Range; const Rr = @import("../rr.zig").Rr; +const r = @import("../rr.zig"); +const RrOpcode = @import("../rr.zig").RrOpcode; const MiscRr = @import("../rr.zig").MiscRr; +const VirtualMachine = @import("../instance/vm.zig").VirtualMachine; +const Instruction = VirtualMachine.Instruction; pub const Parsed = struct { start: usize, max_depth: usize, }; +pub const Continuation = struct { + ip: [*]Instruction, + tag: RrOpcode, +}; + pub const Parser = struct { function: []const u8 = undefined, code: []const u8 = undefined, - code_ptr: usize, + code_ptr: [*]Instruction, module: *Module, validator: Validator = undefined, params: ?[]const ValType, locals: ?[]LocalType, - continuation_stack: [1024]usize = [_]usize{0} ** 1024, + continuation_stack: [1024]Continuation = undefined, continuation_stack_ptr: usize, is_constant: bool = false, scope: usize, pub fn init(module: *Module) Parser { return Parser{ - .code_ptr = module.parsed_code.items.len, + .code_ptr = @as([*]Instruction, @ptrCast(&module.instructions.items[module.instructions.items.len - 1])), .module = module, .params = null, .locals = null, @@ -53,19 +62,19 @@ pub const Parser = struct { self.function = code; self.code = code; - const code_start = self.module.parsed_code.items.len; + const code_start = self.module.instructions.items.len; try self.pushFunction(locals, funcidx); while (try self.next()) |instr| { - try self.module.parsed_code.append(instr); + try self.module.instructions.append(VirtualMachine.lookup[@intFromEnum(instr)]); } const bytes_read = self.bytesRead(); _ = try self.module.readSlice(bytes_read); // Patch last end so that it is return - self.module.parsed_code.items[self.module.parsed_code.items.len - 1] = .@"return"; + self.module.instructions.items[self.module.instructions.items.len - 1] = VirtualMachine.@"return"; return Parsed{ .start = code_start, .max_depth = self.validator.max_depth }; } @@ -76,7 +85,7 @@ pub const Parser = struct { self.function = code; self.code = code; - const code_start = self.module.parsed_code.items.len; + const code_start = self.module.instructions.items.len; const in: [0]ValType = [_]ValType{} ** 0; const out: [1]ValType = [_]ValType{valtype} ** 1; @@ -100,14 +109,14 @@ pub const Parser = struct { => {}, else => return error.ValidatorConstantExpressionRequired, } - try self.module.parsed_code.append(instr); + try self.module.instructions.append(VirtualMachine.lookup[@intFromEnum(instr)]); } const bytes_read = self.bytesRead(); _ = try self.module.readSlice(bytes_read); // Patch last end so that it is return - self.module.parsed_code.items[self.module.parsed_code.items.len - 1] = .@"return"; + self.module.instructions.items[self.module.instructions.items.len - 1] = VirtualMachine.@"return"; return Parsed{ .start = code_start, .max_depth = self.validator.max_depth }; } @@ -127,19 +136,19 @@ pub const Parser = struct { ); } - fn pushContinuationStack(self: *Parser, offset: usize) !void { + fn pushContinuationStack(self: *Parser, ip: [*]Instruction, tag: RrOpcode) !void { defer self.continuation_stack_ptr += 1; if (self.continuation_stack_ptr >= self.continuation_stack.len) return error.ContinuationStackOverflow; - self.continuation_stack[self.continuation_stack_ptr] = offset; + self.continuation_stack[self.continuation_stack_ptr] = .{ .ip = ip, .tag = tag }; } - fn peekContinuationStack(self: *Parser) !usize { + fn peekContinuationStack(self: *Parser) !Continuation { if (self.continuation_stack_ptr <= 0) return error.ContinuationStackUnderflow; // No test covering this return self.continuation_stack[self.continuation_stack_ptr - 1]; } - fn popContinuationStack(self: *Parser) !usize { + fn popContinuationStack(self: *Parser) !Continuation { if (self.continuation_stack_ptr <= 0) return error.ContinuationStackUnderflow; self.continuation_stack_ptr -= 1; @@ -195,14 +204,14 @@ pub const Parser = struct { } } - try self.pushContinuationStack(self.code_ptr); + try self.pushContinuationStack(self.code_ptr, .block); self.scope += 1; rr = Rr{ .block = .{ .param_arity = block_params, .return_arity = block_returns, - .branch_target = 0, + .branch_target = @as([*]Instruction, @ptrCast(&self.module.instructions.items[0])), }, }; }, @@ -233,14 +242,14 @@ pub const Parser = struct { } } - try self.pushContinuationStack(self.code_ptr); + try self.pushContinuationStack(self.code_ptr, .loop); self.scope += 1; rr = Rr{ .loop = .{ .param_arity = block_params, .return_arity = block_params, - .branch_target = math.cast(u32, self.code_ptr) orelse return error.FailedCast, + .branch_target = self.code_ptr, }, }; }, @@ -276,30 +285,32 @@ pub const Parser = struct { } } - try self.pushContinuationStack(self.code_ptr); + // Assume we'll have an if with no else, if we later later parse a corresponding else + // we'll patch this + try self.pushContinuationStack(self.code_ptr, .if_no_else); self.scope += 1; rr = Rr{ .if_no_else = .{ .param_arity = block_params, .return_arity = block_returns, - .branch_target = 0, + .branch_target = @as([*]Instruction, @ptrCast(&self.module.instructions.items[0])), }, }; }, .@"else" => { - const parsed_code_offset = try self.peekContinuationStack(); - - switch (self.module.parsed_code.items[parsed_code_offset]) { - .if_no_else => |*b| { - self.module.parsed_code.items[parsed_code_offset] = Rr{ - .@"if" = .{ - .param_arity = b.param_arity, - .return_arity = b.return_arity, - .branch_target = 0, - .else_ip = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast, - }, - }; + const continuation = try self.peekContinuationStack(); + + switch (continuation.tag) { + .if_no_else => { + const b = @as(r.if_no_else, @bitCast(continuation.ip[1])); + continuation.ip[1] = @as(Instruction, @bitCast(r.@"if"{ + .param_arity = b.param_arity, + .return_arity = b.return_arity, + .branch_target = @as([*]Instruction, @ptrCast(&self.module.instructions.items[0])), + .else_ip = self.code_ptr + 1, + })); + continuation.ip.* = VirtualMachine.@"if"; }, else => return error.UnexpectedInstruction, } @@ -313,16 +324,16 @@ pub const Parser = struct { const parsed_code_offset = try self.popContinuationStack(); switch (self.module.parsed_code.items[parsed_code_offset]) { - .block => |*b| b.branch_target = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast, + .block => |*b| b.branch_target = self.code_ptr + 1, .loop => {}, .@"if" => |*b| { - b.branch_target = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast; + b.branch_target = self.code_ptr + 1; }, .if_no_else => |*b| { // We have an if with no else, check that this works arity-wise and replace with fast if if (b.param_arity -% b.return_arity != 0) return error.ValidatorElseBranchExpected; - b.branch_target = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast; + b.branch_target = self.code_ptr + 1; }, else => return error.UnexpectedInstruction, } diff --git a/src/rr.zig b/src/rr.zig index cf62a9fc..64985ffc 100644 --- a/src/rr.zig +++ b/src/rr.zig @@ -1,5 +1,6 @@ const MiscOpcode = @import("opcode.zig").MiscOpcode; const RefType = @import("valtype.zig").RefType; +const Instruction = @import("instance/vm.zig").VirtualMachine.Instruction; pub const RrOpcode = enum(u8) { @"unreachable" = 0x0, @@ -51,10 +52,10 @@ pub const RrOpcode = enum(u8) { @"i64.store32" = 0x3e, @"memory.size" = 0x3f, @"memory.grow" = 0x40, - @"i32.const" = 0x41, - @"i64.const" = 0x42, - @"f32.const" = 0x43, - @"f64.const" = 0x44, + @"i32.pub const" = 0x41, + @"i64.pub const" = 0x42, + @"f32.pub const" = 0x43, + @"f64.pub const" = 0x44, @"i32.eqz" = 0x45, @"i32.eq" = 0x46, @"i32.ne" = 0x47, @@ -189,334 +190,601 @@ pub const RrOpcode = enum(u8) { misc = 0xfc, }; +pub fn nextIp(comptime Type: type) comptime_int { + const word_size_bits = 8 * @sizeOf(usize); + const bits = @bitSizeOf(Type); + const round_up_bits = if (bits % word_size_bits == 0) 0 else word_size_bits - (bits % word_size_bits); + return ((bits + round_up_bits) / word_size_bits) + 1; +} + +// fn nextIp(comptime Type: type) comptime_int { +// pub const word_size_bits = 8 * @sizeOf(usize); + +// comptime var bits = switch (@typeInfo(Type)) { +// .Struct => |info| blk: { +// comptime var i = 0; +// inline for (info.fields) |field| { +// switch (@typeInfo(field.type)) { +// .Int => |int| i += int.bits, +// else => @compileError("Expected int"), +// } +// } +// break :blk i; +// }, +// .Void => 0, +// .Int => |int| int.bits, +// else => @compileError("Unexpected type"), +// }; + +// pub const round_up_bits = if (bits % word_size_bits == 0) 0 else word_size_bits - (bits % word_size_bits); + +// return ((bits + round_up_bits) / word_size_bits) + 1; +// } + +pub fn meta(ip: [*]Instruction, comptime Type: type) type { + const start = ip + 1; + + return @as(@TypeOf(Type), @bitCast(start)); +} + +// fn getFieldType(comptime Type: type, field: []pub const u8) type { +// switch (@typeInfo(Type)) { +// .Struct => |info| blk: { +// comptime var i = 0; +// inline for (info.fields) |field| { +// switch (@typeInfo(field.type)) { +// .Int => |int| i += int.bits, +// else => @compileError("Expected int"), +// } +// } +// break :blk i; +// }, +// .Void => 0, +// .Int => |int| int.type, +// else => @compileError("Unexpected type"), +// }; +// } + +// fn immediate(comptime Struct: type, comptime field: []pub const u8) getFieldType(Struct, field) { +// pub const struct_info = switch (@typeInfo(Type)) { +// .Struct => |info| info, +// else => @compileError("Expected struct"), +// }; + +// comptime var i = 0; +// inline for (struct_info.fields) |field| { +// switch (@typeInfo(field.type)) { +// .Int => |int| i += int.bits, +// else => @compileError("Expected int"), +// } +// } +// } + +test { + const std = @import("std"); + const testing = std.testing; + + try testing.expectEqual(1, nextIp(@"unreachable")); + try testing.expectEqual(1, nextIp(nop)); + try testing.expectEqual(2, nextIp(block)); + try testing.expectEqual(2, nextIp(loop)); + try testing.expectEqual(3, nextIp(@"if")); + try testing.expectEqual(2, nextIp(if_no_else)); + try testing.expectEqual(3, nextIp(fast_call)); + try testing.expectEqual(2, nextIp(br)); +} + pub const Rr = union(RrOpcode) { - @"unreachable": void, - nop: void, - block: struct { - param_arity: u16, - return_arity: u16, - branch_target: u32, - }, - loop: struct { - param_arity: u16, - return_arity: u16, - branch_target: u32, - }, - @"if": struct { - param_arity: u16, - return_arity: u16, - branch_target: u32, - else_ip: u32, - }, - @"else": void, - if_no_else: struct { - param_arity: u16, - return_arity: u16, - branch_target: u32, - }, - end: void, - br: u32, - br_if: u32, - br_table: struct { - ls: Range, - ln: u32, - }, - @"return": void, - call: usize, // u32? - call_indirect: struct { - typeidx: u32, - tableidx: u32, - }, - fast_call: struct { - start: u32, - locals: u16, - params: u16, - results: u16, - required_stack_space: u16, - }, - drop: void, - select: void, - @"local.get": u32, - @"local.set": u32, - @"local.tee": u32, - @"global.get": u32, - @"global.set": u32, - @"table.get": u32, // tableidx - @"table.set": u32, // tableidx - @"i32.load": struct { - alignment: u32, - offset: u32, - }, - @"i64.load": struct { - alignment: u32, - offset: u32, - }, - @"f32.load": struct { - alignment: u32, - offset: u32, - }, - @"f64.load": struct { - alignment: u32, - offset: u32, - }, - @"i32.load8_s": struct { - alignment: u32, - offset: u32, - }, - @"i32.load8_u": struct { - alignment: u32, - offset: u32, - }, - @"i32.load16_s": struct { - alignment: u32, - offset: u32, - }, - @"i32.load16_u": struct { - alignment: u32, - offset: u32, - }, - @"i64.load8_s": struct { - alignment: u32, - offset: u32, - }, - @"i64.load8_u": struct { - alignment: u32, - offset: u32, - }, - @"i64.load16_s": struct { - alignment: u32, - offset: u32, - }, - @"i64.load16_u": struct { - alignment: u32, - offset: u32, - }, - @"i64.load32_s": struct { - alignment: u32, - offset: u32, - }, - @"i64.load32_u": struct { - alignment: u32, - offset: u32, - }, - @"i32.store": struct { - alignment: u32, - offset: u32, - }, - @"i64.store": struct { - alignment: u32, - offset: u32, - }, - @"f32.store": struct { - alignment: u32, - offset: u32, - }, - @"f64.store": struct { - alignment: u32, - offset: u32, - }, - @"i32.store8": struct { - alignment: u32, - offset: u32, - }, - @"i32.store16": struct { - alignment: u32, - offset: u32, - }, - @"i64.store8": struct { - alignment: u32, - offset: u32, - }, - @"i64.store16": struct { - alignment: u32, - offset: u32, - }, - @"i64.store32": struct { - alignment: u32, - offset: u32, - }, - @"memory.size": u32, - @"memory.grow": u32, - @"i32.const": i32, - @"i64.const": i64, - @"f32.const": f32, - @"f64.const": f64, - @"i32.eqz": void, - @"i32.eq": void, - @"i32.ne": void, - @"i32.lt_s": void, - @"i32.lt_u": void, - @"i32.gt_s": void, - @"i32.gt_u": void, - @"i32.le_s": void, - @"i32.le_u": void, - @"i32.ge_s": void, - @"i32.ge_u": void, - @"i64.eqz": void, - @"i64.eq": void, - @"i64.ne": void, - @"i64.lt_s": void, - @"i64.lt_u": void, - @"i64.gt_s": void, - @"i64.gt_u": void, - @"i64.le_s": void, - @"i64.le_u": void, - @"i64.ge_s": void, - @"i64.ge_u": void, - @"f32.eq": void, - @"f32.ne": void, - @"f32.lt": void, - @"f32.gt": void, - @"f32.le": void, - @"f32.ge": void, - @"f64.eq": void, - @"f64.ne": void, - @"f64.lt": void, - @"f64.gt": void, - @"f64.le": void, - @"f64.ge": void, - @"i32.clz": void, - @"i32.ctz": void, - @"i32.popcnt": void, - @"i32.add": void, - @"i32.sub": void, - @"i32.mul": void, - @"i32.div_s": void, - @"i32.div_u": void, - @"i32.rem_s": void, - @"i32.rem_u": void, - @"i32.and": void, - @"i32.or": void, - @"i32.xor": void, - @"i32.shl": void, - @"i32.shr_s": void, - @"i32.shr_u": void, - @"i32.rotl": void, - @"i32.rotr": void, - @"i64.clz": void, - @"i64.ctz": void, - @"i64.popcnt": void, - @"i64.add": void, - @"i64.sub": void, - @"i64.mul": void, - @"i64.div_s": void, - @"i64.div_u": void, - @"i64.rem_s": void, - @"i64.rem_u": void, - @"i64.and": void, - @"i64.or": void, - @"i64.xor": void, - @"i64.shl": void, - @"i64.shr_s": void, - @"i64.shr_u": void, - @"i64.rotl": void, - @"i64.rotr": void, - @"f32.abs": void, - @"f32.neg": void, - @"f32.ceil": void, - @"f32.floor": void, - @"f32.trunc": void, - @"f32.nearest": void, - @"f32.sqrt": void, - @"f32.add": void, - @"f32.sub": void, - @"f32.mul": void, - @"f32.div": void, - @"f32.min": void, - @"f32.max": void, - @"f32.copysign": void, - @"f64.abs": void, - @"f64.neg": void, - @"f64.ceil": void, - @"f64.floor": void, - @"f64.trunc": void, - @"f64.nearest": void, - @"f64.sqrt": void, - @"f64.add": void, - @"f64.sub": void, - @"f64.mul": void, - @"f64.div": void, - @"f64.min": void, - @"f64.max": void, - @"f64.copysign": void, - @"i32.wrap_i64": void, - @"i32.trunc_f32_s": void, - @"i32.trunc_f32_u": void, - @"i32.trunc_f64_s": void, - @"i32.trunc_f64_u": void, - @"i64.extend_i32_s": void, - @"i64.extend_i32_u": void, - @"i64.trunc_f32_s": void, - @"i64.trunc_f32_u": void, - @"i64.trunc_f64_s": void, - @"i64.trunc_f64_u": void, - @"f32.convert_i32_s": void, - @"f32.convert_i32_u": void, - @"f32.convert_i64_s": void, - @"f32.convert_i64_u": void, - @"f32.demote_f64": void, - @"f64.convert_i32_s": void, - @"f64.convert_i32_u": void, - @"f64.convert_i64_s": void, - @"f64.convert_i64_u": void, - @"f64.promote_f32": void, - @"i32.reinterpret_f32": void, - @"i64.reinterpret_f64": void, - @"f32.reinterpret_i32": void, - @"f64.reinterpret_i64": void, - @"i32.extend8_s": void, - @"i32.extend16_s": void, - @"i64.extend8_s": void, - @"i64.extend16_s": void, - @"i64.extend32_s": void, - @"ref.null": RefType, - @"ref.is_null": void, - @"ref.func": u32, - misc: MiscRr, + @"unreachable": @"unreachable", + nop: nop, + block: block, + loop: loop, + @"if": @"if", + @"else": @"else", + if_no_else: if_no_else, + end: end, + br: br, + br_if: br_if, + br_table: br_table, + @"return": @"return", + call: call, + call_indirect: call_indirect, + fast_call: fast_call, + drop: drop, + select: select, + @"local.get": @"local.get", + @"local.set": @"local.set", + @"local.tee": @"local.tee", + @"global.get": @"global.get", + @"global.set": @"global.set", + @"table.get": @"table.get", + @"table.set": @"table.set", + @"i32.load": @"i32.load", + @"i64.load": @"i64.load", + @"f32.load": @"f32.load", + @"f64.load": @"f64.load", + @"i32.load8_s": @"i32.load8_s", + @"i32.load8_u": @"i32.load8_u", + @"i32.load16_s": @"i32.load16_s", + @"i32.load16_u": @"i32.load16_u", + @"i64.load8_s": @"i64.load8_s", + @"i64.load8_u": @"i64.load8_u", + @"i64.load16_s": @"i64.load16_s", + @"i64.load16_u": @"i64.load16_u", + @"i64.load32_s": @"i64.load32_s", + @"i64.load32_u": @"i64.load32_u", + @"i32.store": @"i32.store", + @"i64.store": @"i64.store", + @"f32.store": @"f32.store", + @"f64.store": @"f64.store", + @"i32.store8": @"i32.store8", + @"i32.store16": @"i32.store16", + @"i64.store8": @"i64.store8", + @"i64.store16": @"i64.store16", + @"i64.store32": @"i64.store32", + @"memory.size": @"memory.size", + @"memory.grow": @"memory.grow", + @"i32.pub const": @"i32.pub const", + @"i64.pub const": @"i64.pub const", + @"f32.pub const": @"f32.pub const", + @"f64.pub const": @"f64.pub const", + @"i32.eqz": @"i32.eqz", + @"i32.eq": @"i32.eq", + @"i32.ne": @"i32.ne", + @"i32.lt_s": @"i32.lt_s", + @"i32.lt_u": @"i32.lt_u", + @"i32.gt_s": @"i32.gt_s", + @"i32.gt_u": @"i32.gt_u", + @"i32.le_s": @"i32.le_s", + @"i32.le_u": @"i32.le_u", + @"i32.ge_s": @"i32.ge_s", + @"i32.ge_u": @"i32.ge_u", + @"i64.eqz": @"i64.eqz", + @"i64.eq": @"i64.eq", + @"i64.ne": @"i64.ne", + @"i64.lt_s": @"i64.lt_s", + @"i64.lt_u": @"i64.lt_u", + @"i64.gt_s": @"i64.gt_s", + @"i64.gt_u": @"i64.gt_u", + @"i64.le_s": @"i64.le_s", + @"i64.le_u": @"i64.le_u", + @"i64.ge_s": @"i64.ge_s", + @"i64.ge_u": @"i64.ge_u", + @"f32.eq": @"f32.eq", + @"f32.ne": @"f32.ne", + @"f32.lt": @"f32.lt", + @"f32.gt": @"f32.gt", + @"f32.le": @"f32.le", + @"f32.ge": @"f32.ge", + @"f64.eq": @"f64.eq", + @"f64.ne": @"f64.ne", + @"f64.lt": @"f64.lt", + @"f64.gt": @"f64.gt", + @"f64.le": @"f64.le", + @"f64.ge": @"f64.ge", + @"i32.clz": @"i32.clz", + @"i32.ctz": @"i32.ctz", + @"i32.popcnt": @"i32.popcnt", + @"i32.add": @"i32.add", + @"i32.sub": @"i32.sub", + @"i32.mul": @"i32.mul", + @"i32.div_s": @"i32.div_s", + @"i32.div_u": @"i32.div_u", + @"i32.rem_s": @"i32.rem_s", + @"i32.rem_u": @"i32.rem_u", + @"i32.and": @"i32.and", + @"i32.or": @"i32.or", + @"i32.xor": @"i32.xor", + @"i32.shl": @"i32.shl", + @"i32.shr_s": @"i32.shr_s", + @"i32.shr_u": @"i32.shr_u", + @"i32.rotl": @"i32.rotl", + @"i32.rotr": @"i32.rotr", + @"i64.clz": @"i64.clz", + @"i64.ctz": @"i64.ctz", + @"i64.popcnt": @"i64.popcnt", + @"i64.add": @"i64.add", + @"i64.sub": @"i64.sub", + @"i64.mul": @"i64.mul", + @"i64.div_s": @"i64.div_s", + @"i64.div_u": @"i64.div_u", + @"i64.rem_s": @"i64.rem_s", + @"i64.rem_u": @"i64.rem_u", + @"i64.and": @"i64.and", + @"i64.or": @"i64.or", + @"i64.xor": @"i64.xor", + @"i64.shl": @"i64.shl", + @"i64.shr_s": @"i64.shr_s", + @"i64.shr_u": @"i64.shr_u", + @"i64.rotl": @"i64.rotl", + @"i64.rotr": @"i64.rotr", + @"f32.abs": @"f32.abs", + @"f32.neg": @"f32.neg", + @"f32.ceil": @"f32.ceil", + @"f32.floor": @"f32.floor", + @"f32.trunc": @"f32.trunc", + @"f32.nearest": @"f32.nearest", + @"f32.sqrt": @"f32.sqrt", + @"f32.add": @"f32.add", + @"f32.sub": @"f32.sub", + @"f32.mul": @"f32.mul", + @"f32.div": @"f32.div", + @"f32.min": @"f32.min", + @"f32.max": @"f32.max", + @"f32.copysign": @"f32.copysign", + @"f64.abs": @"f64.abs", + @"f64.neg": @"f64.neg", + @"f64.ceil": @"f64.ceil", + @"f64.floor": @"f64.floor", + @"f64.trunc": @"f64.trunc", + @"f64.nearest": @"f64.nearest", + @"f64.sqrt": @"f64.sqrt", + @"f64.add": @"f64.add", + @"f64.sub": @"f64.sub", + @"f64.mul": @"f64.mul", + @"f64.div": @"f64.div", + @"f64.min": @"f64.min", + @"f64.max": @"f64.max", + @"f64.copysign": @"f64.copysign", + @"i32.wrap_i64": @"i32.wrap_i64", + @"i32.trunc_f32_s": @"i32.trunc_f32_s", + @"i32.trunc_f32_u": @"i32.trunc_f32_u", + @"i32.trunc_f64_s": @"i32.trunc_f64_s", + @"i32.trunc_f64_u": @"i32.trunc_f64_u", + @"i64.extend_i32_s": @"i64.extend_i32_s", + @"i64.extend_i32_u": @"i64.extend_i32_u", + @"i64.trunc_f32_s": @"i64.trunc_f32_s", + @"i64.trunc_f32_u": @"i64.trunc_f32_u", + @"i64.trunc_f64_s": @"i64.trunc_f64_s", + @"i64.trunc_f64_u": @"i64.trunc_f64_u", + @"f32.convert_i32_s": @"f32.convert_i32_s", + @"f32.convert_i32_u": @"f32.convert_i32_u", + @"f32.convert_i64_s": @"f32.convert_i64_s", + @"f32.convert_i64_u": @"f32.convert_i64_u", + @"f32.demote_f64": @"f32.demote_f64", + @"f64.convert_i32_s": @"f64.convert_i32_s", + @"f64.convert_i32_u": @"f64.convert_i32_u", + @"f64.convert_i64_s": @"f64.convert_i64_s", + @"f64.convert_i64_u": @"f64.convert_i64_u", + @"f64.promote_f32": @"f64.promote_f32", + @"i32.reinterpret_f32": @"i32.reinterpret_f32", + @"i64.reinterpret_f64": @"i64.reinterpret_f64", + @"f32.reinterpret_i32": @"f32.reinterpret_i32", + @"f64.reinterpret_i64": @"f64.reinterpret_i64", + @"i32.extend8_s": @"i32.extend8_s", + @"i32.extend16_s": @"i32.extend16_s", + @"i64.extend8_s": @"i64.extend8_s", + @"i64.extend16_s": @"i64.extend16_s", + @"i64.extend32_s": @"i64.extend32_s", + @"ref.null": @"ref.null", + @"ref.is_null": @"ref.is_null", + @"ref.func": @"ref.func", + misc: void, }; -pub const MiscRr = union(MiscOpcode) { - @"i32.trunc_sat_f32_s": void, - @"i32.trunc_sat_f32_u": void, - @"i32.trunc_sat_f64_s": void, - @"i32.trunc_sat_f64_u": void, - @"i64.trunc_sat_f32_s": void, - @"i64.trunc_sat_f32_u": void, - @"i64.trunc_sat_f64_s": void, - @"i64.trunc_sat_f64_u": void, - @"memory.init": struct { - dataidx: u32, - memidx: u32, - }, - @"data.drop": u32, - @"memory.copy": struct { - src_memidx: u8, - dest_memidx: u8, - }, - @"memory.fill": u8, - @"table.init": struct { - tableidx: u32, - elemidx: u32, - }, - @"elem.drop": struct { - elemidx: u32, - }, - @"table.copy": struct { - dest_tableidx: u32, - src_tableidx: u32, - }, - @"table.grow": struct { - tableidx: u32, - }, - @"table.size": struct { - tableidx: u32, - }, - @"table.fill": struct { - tableidx: u32, - }, +pub const @"unreachable" = void; +pub const nop = void; +pub const block = packed struct { + branch_target: [*]Instruction, + param_arity: u16, + return_arity: u16, +}; +pub const loop = packed struct { + branch_target: [*]Instruction, + param_arity: u16, + return_arity: u16, +}; +pub const @"if" = packed struct { + branch_target: [*]Instruction, + else_ip: [*]Instruction, + param_arity: u16, + return_arity: u16, +}; +pub const @"else" = void; +pub const if_no_else = packed struct { + branch_target: [*]Instruction, + param_arity: u16, + return_arity: u16, +}; +pub const end = void; +pub const br = u32; +pub const br_if = u32; +pub const br_table = packed struct { + ls: Range, + ln: u32, +}; +pub const @"return" = void; +pub const call = *Instruction; +pub const call_indirect = packed struct { + typeidx: u32, + tableidx: u32, +}; +pub const fast_call = packed struct { + start: [*]Instruction, + locals: u16, + params: u16, + results: u16, + required_stack_space: u16, +}; +pub const drop = void; +pub const select = void; +pub const @"local.get" = u32; +pub const @"local.set" = u32; +pub const @"local.tee" = u32; +pub const @"global.get" = u32; +pub const @"global.set" = u32; +pub const @"table.get" = u32; // tableidx +pub const @"table.set" = u32; // tableidx +pub const @"i32.load" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.load" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"f32.load" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"f64.load" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i32.load8_s" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i32.load8_u" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i32.load16_s" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i32.load16_u" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.load8_s" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.load8_u" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.load16_s" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.load16_u" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.load32_s" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.load32_u" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i32.store" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.store" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"f32.store" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"f64.store" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i32.store8" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i32.store16" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.store8" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.store16" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"i64.store32" = packed struct { + alignment: u32, + offset: u32, +}; +pub const @"memory.size" = u32; +pub const @"memory.grow" = u32; +pub const @"i32.pub const" = i32; +pub const @"i64.pub const" = i64; +pub const @"f32.pub const" = f32; +pub const @"f64.pub const" = f64; +pub const @"i32.eqz" = void; +pub const @"i32.eq" = void; +pub const @"i32.ne" = void; +pub const @"i32.lt_s" = void; +pub const @"i32.lt_u" = void; +pub const @"i32.gt_s" = void; +pub const @"i32.gt_u" = void; +pub const @"i32.le_s" = void; +pub const @"i32.le_u" = void; +pub const @"i32.ge_s" = void; +pub const @"i32.ge_u" = void; +pub const @"i64.eqz" = void; +pub const @"i64.eq" = void; +pub const @"i64.ne" = void; +pub const @"i64.lt_s" = void; +pub const @"i64.lt_u" = void; +pub const @"i64.gt_s" = void; +pub const @"i64.gt_u" = void; +pub const @"i64.le_s" = void; +pub const @"i64.le_u" = void; +pub const @"i64.ge_s" = void; +pub const @"i64.ge_u" = void; +pub const @"f32.eq" = void; +pub const @"f32.ne" = void; +pub const @"f32.lt" = void; +pub const @"f32.gt" = void; +pub const @"f32.le" = void; +pub const @"f32.ge" = void; +pub const @"f64.eq" = void; +pub const @"f64.ne" = void; +pub const @"f64.lt" = void; +pub const @"f64.gt" = void; +pub const @"f64.le" = void; +pub const @"f64.ge" = void; +pub const @"i32.clz" = void; +pub const @"i32.ctz" = void; +pub const @"i32.popcnt" = void; +pub const @"i32.add" = void; +pub const @"i32.sub" = void; +pub const @"i32.mul" = void; +pub const @"i32.div_s" = void; +pub const @"i32.div_u" = void; +pub const @"i32.rem_s" = void; +pub const @"i32.rem_u" = void; +pub const @"i32.and" = void; +pub const @"i32.or" = void; +pub const @"i32.xor" = void; +pub const @"i32.shl" = void; +pub const @"i32.shr_s" = void; +pub const @"i32.shr_u" = void; +pub const @"i32.rotl" = void; +pub const @"i32.rotr" = void; +pub const @"i64.clz" = void; +pub const @"i64.ctz" = void; +pub const @"i64.popcnt" = void; +pub const @"i64.add" = void; +pub const @"i64.sub" = void; +pub const @"i64.mul" = void; +pub const @"i64.div_s" = void; +pub const @"i64.div_u" = void; +pub const @"i64.rem_s" = void; +pub const @"i64.rem_u" = void; +pub const @"i64.and" = void; +pub const @"i64.or" = void; +pub const @"i64.xor" = void; +pub const @"i64.shl" = void; +pub const @"i64.shr_s" = void; +pub const @"i64.shr_u" = void; +pub const @"i64.rotl" = void; +pub const @"i64.rotr" = void; +pub const @"f32.abs" = void; +pub const @"f32.neg" = void; +pub const @"f32.ceil" = void; +pub const @"f32.floor" = void; +pub const @"f32.trunc" = void; +pub const @"f32.nearest" = void; +pub const @"f32.sqrt" = void; +pub const @"f32.add" = void; +pub const @"f32.sub" = void; +pub const @"f32.mul" = void; +pub const @"f32.div" = void; +pub const @"f32.min" = void; +pub const @"f32.max" = void; +pub const @"f32.copysign" = void; +pub const @"f64.abs" = void; +pub const @"f64.neg" = void; +pub const @"f64.ceil" = void; +pub const @"f64.floor" = void; +pub const @"f64.trunc" = void; +pub const @"f64.nearest" = void; +pub const @"f64.sqrt" = void; +pub const @"f64.add" = void; +pub const @"f64.sub" = void; +pub const @"f64.mul" = void; +pub const @"f64.div" = void; +pub const @"f64.min" = void; +pub const @"f64.max" = void; +pub const @"f64.copysign" = void; +pub const @"i32.wrap_i64" = void; +pub const @"i32.trunc_f32_s" = void; +pub const @"i32.trunc_f32_u" = void; +pub const @"i32.trunc_f64_s" = void; +pub const @"i32.trunc_f64_u" = void; +pub const @"i64.extend_i32_s" = void; +pub const @"i64.extend_i32_u" = void; +pub const @"i64.trunc_f32_s" = void; +pub const @"i64.trunc_f32_u" = void; +pub const @"i64.trunc_f64_s" = void; +pub const @"i64.trunc_f64_u" = void; +pub const @"f32.convert_i32_s" = void; +pub const @"f32.convert_i32_u" = void; +pub const @"f32.convert_i64_s" = void; +pub const @"f32.convert_i64_u" = void; +pub const @"f32.demote_f64" = void; +pub const @"f64.convert_i32_s" = void; +pub const @"f64.convert_i32_u" = void; +pub const @"f64.convert_i64_s" = void; +pub const @"f64.convert_i64_u" = void; +pub const @"f64.promote_f32" = void; +pub const @"i32.reinterpret_f32" = void; +pub const @"i64.reinterpret_f64" = void; +pub const @"f32.reinterpret_i32" = void; +pub const @"f64.reinterpret_i64" = void; +pub const @"i32.extend8_s" = void; +pub const @"i32.extend16_s" = void; +pub const @"i64.extend8_s" = void; +pub const @"i64.extend16_s" = void; +pub const @"i64.extend32_s" = void; +pub const @"ref.null" = RefType; +pub const @"ref.is_null" = void; +pub const @"ref.func" = u32; +pub const misc = void; +pub const @"i32.trunc_sat_f32_s" = void; +pub const @"i32.trunc_sat_f32_u" = void; +pub const @"i32.trunc_sat_f64_s" = void; +pub const @"i32.trunc_sat_f64_u" = void; +pub const @"i64.trunc_sat_f32_s" = void; +pub const @"i64.trunc_sat_f32_u" = void; +pub const @"i64.trunc_sat_f64_s" = void; +pub const @"i64.trunc_sat_f64_u" = void; +pub const @"memory.init" = packed struct { + dataidx: u32, + memidx: u32, +}; +pub const @"data.drop" = u32; +pub const @"memory.copy" = packed struct { + src_memidx: u8, + dest_memidx: u8, +}; +pub const @"memory.fill" = u8; +pub const @"table.init" = packed struct { + tableidx: u32, + elemidx: u32, +}; +pub const @"elem.drop" = packed struct { + elemidx: u32, +}; +pub const @"table.copy" = packed struct { + dest_tableidx: u32, + src_tableidx: u32, +}; +pub const @"table.grow" = packed struct { + tableidx: u32, +}; +pub const @"table.size" = packed struct { + tableidx: u32, +}; +pub const @"table.fill" = packed struct { + tableidx: u32, }; -pub const Range = struct { +pub const Range = packed struct { offset: usize = 0, count: usize = 0, }; diff --git a/src/store/elem.zig b/src/store/elem.zig index 071a22a2..71ed4331 100644 --- a/src/store/elem.zig +++ b/src/store/elem.zig @@ -3,7 +3,7 @@ const mem = std.mem; const RefType = @import("../valtype.zig").RefType; pub const Elem = struct { - @"type": RefType, + type: RefType, elem: []u32, alloc: mem.Allocator, dropped: bool = false, @@ -12,7 +12,7 @@ pub const Elem = struct { const elem = try alloc.alloc(u32, count); return Elem{ - .@"type" = reftype, + .type = reftype, .elem = elem, .alloc = alloc, }; diff --git a/src/store/memory.zig b/src/store/memory.zig index 0895e430..984fefe1 100644 --- a/src/store/memory.zig +++ b/src/store/memory.zig @@ -55,7 +55,7 @@ pub const Memory = struct { mem.copy(u8, self.data.items[address .. address + data.len], data); } - pub fn uncheckedFill(self: *Memory, dst_address: u32, n: u32, value: u8) void { + pub fn uncheckedFill(self: *Memory, dst_address: u32, n: u32, value: u8) void { @memset(self.data.items[dst_address .. dst_address + n], value); } @@ -76,7 +76,6 @@ pub const Memory = struct { const effective_address = @as(u33, offset) + @as(u33, address); if (effective_address + @sizeOf(T) - 1 >= self.data.items.len) return error.OutOfBoundsMemoryAccess; - switch (T) { u8, u16,