diff --git a/winch/codegen/src/abi/mod.rs b/winch/codegen/src/abi/mod.rs index 79e5229198f4..f7f37b301fb8 100644 --- a/winch/codegen/src/abi/mod.rs +++ b/winch/codegen/src/abi/mod.rs @@ -56,20 +56,20 @@ pub(crate) use local::*; /// specific registers, etc. pub(crate) trait ABI { /// The required stack alignment. - fn stack_align(&self) -> u8; + fn stack_align() -> u8; /// The required stack alignment for calls. - fn call_stack_align(&self) -> u8; + fn call_stack_align() -> u8; /// The offset to the argument base, relative to the frame pointer. - fn arg_base_offset(&self) -> u8; + fn arg_base_offset() -> u8; /// The offset to the return address, relative to the frame pointer. fn ret_addr_offset() -> u8; /// Construct the ABI-specific signature from a WebAssembly /// function type. - fn sig(&self, wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig; + fn sig(wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig; /// Returns the number of bits in a word. fn word_bits() -> u32; diff --git a/winch/codegen/src/codegen/call.rs b/winch/codegen/src/codegen/call.rs index c1eae0b65545..6c04c8dfada3 100644 --- a/winch/codegen/src/codegen/call.rs +++ b/winch/codegen/src/codegen/call.rs @@ -84,7 +84,7 @@ impl<'a> FnCall<'a> { /// want to calculate any adjustments to the caller's frame, after /// having saved any live registers, so that we can account for /// any pushes generated by register spilling. - pub fn new( + pub fn new( callee_sig: &'a ABISig, context: &mut CodeGenContext, masm: &mut M, @@ -135,52 +135,43 @@ impl<'a> FnCall<'a> { Self { abi_sig: &callee_sig, arg_stack_space, - call_stack_space: (spilled_regs * ::word_bytes()) - + (memory_values * ::word_bytes()), + call_stack_space: (spilled_regs * ::word_bytes()) + + (memory_values * ::word_bytes()), sp_offset_at_callsite, } } /// Emit a direct function call, to a locally defined function. - pub fn direct( + pub fn direct( &self, masm: &mut M, context: &mut CodeGenContext, callee: FuncIndex, - alignment: u32, - addend: u32, ) { - let reserved_stack = masm.call(alignment, addend, self.arg_stack_space, |masm| { - self.assign_args(context, masm, ::scratch_reg()); + let reserved_stack = masm.call(self.arg_stack_space, |masm| { + self.assign_args(context, masm, ::scratch_reg()); CalleeKind::Direct(callee.as_u32()) }); - self.post_call::(masm, context, reserved_stack); + self.post_call::(masm, context, reserved_stack); } /// Emit an indirect function call, using a raw address. - pub fn indirect( + pub fn indirect( &self, masm: &mut M, context: &mut CodeGenContext, addr: M::Address, - alignment: u32, - addend: u32, ) { - let reserved_stack = masm.call(alignment, addend, self.arg_stack_space, |masm| { - let scratch = ::scratch_reg(); + let reserved_stack = masm.call(self.arg_stack_space, |masm| { + let scratch = ::scratch_reg(); self.assign_args(context, masm, scratch); masm.load(addr, scratch, OperandSize::S64); CalleeKind::Indirect(scratch) }); - self.post_call::(masm, context, reserved_stack); + self.post_call::(masm, context, reserved_stack); } - fn post_call( - &self, - masm: &mut M, - context: &mut CodeGenContext, - size: u32, - ) { + fn post_call(&self, masm: &mut M, context: &mut CodeGenContext, size: u32) { masm.free_stack(self.call_stack_space + size); context.drop_last(self.abi_sig.params.len()); // The stack pointer at the end of the function call diff --git a/winch/codegen/src/codegen/mod.rs b/winch/codegen/src/codegen/mod.rs index 9cb32946b71c..d414a89681ae 100644 --- a/winch/codegen/src/codegen/mod.rs +++ b/winch/codegen/src/codegen/mod.rs @@ -9,7 +9,7 @@ use call::FnCall; use wasmparser::{ BinaryReader, FuncType, FuncValidator, ValType, ValidatorResources, VisitOperator, }; -use wasmtime_environ::{FuncIndex, PtrSize}; +use wasmtime_environ::FuncIndex; mod context; pub(crate) use context::*; @@ -18,11 +18,9 @@ pub use env::*; pub mod call; /// The code generation abstraction. -pub(crate) struct CodeGen<'a, A, M, P> +pub(crate) struct CodeGen<'a, M> where M: MacroAssembler, - A: ABI, - P: PtrSize, { /// The ABI-specific representation of the function signature, excluding results. sig: ABISig, @@ -31,33 +29,26 @@ where pub context: CodeGenContext<'a>, /// A reference to the function compilation environment. - pub env: FuncEnv<'a, P>, + pub env: FuncEnv<'a, M::Ptr>, /// The MacroAssembler. pub masm: &'a mut M, - - /// A reference to the current ABI. - pub abi: &'a A, } -impl<'a, A, M, P> CodeGen<'a, A, M, P> +impl<'a, M> CodeGen<'a, M> where M: MacroAssembler, - A: ABI, - P: PtrSize, { pub fn new( masm: &'a mut M, - abi: &'a A, context: CodeGenContext<'a>, - env: FuncEnv<'a, P>, + env: FuncEnv<'a, M::Ptr>, sig: ABISig, ) -> Self { Self { sig, context, masm, - abi, env, } } @@ -89,17 +80,17 @@ where ) -> Result<()> { self.spill_register_arguments(); let defined_locals_range = &self.context.frame.defined_locals_range; - self.masm.zero_mem_range( - defined_locals_range.as_range(), - ::word_bytes(), - &mut self.context.regalloc, - ); + self.masm + .zero_mem_range(defined_locals_range.as_range(), &mut self.context.regalloc); // Save the vmctx pointer to its local slot in case we need to reload it // at any point. let vmctx_addr = self.masm.local_address(&self.context.frame.vmctx_slot); - self.masm - .store(::vmctx_reg().into(), vmctx_addr, OperandSize::S64); + self.masm.store( + ::vmctx_reg().into(), + vmctx_addr, + OperandSize::S64, + ); while !body.eof() { let offset = body.original_position(); @@ -141,7 +132,7 @@ where params.extend_from_slice(&callee.ty.params()); let sig = FuncType::new(params, callee.ty.results().to_owned()); - let caller_vmctx = ::vmctx_reg(); + let caller_vmctx = ::vmctx_reg(); let callee_vmctx = self.context.any_gpr(self.masm); let callee_vmctx_offset = self.env.vmoffsets.vmctx_vmfunction_import_vmctx(index); let callee_vmctx_addr = self.masm.address_at_reg(caller_vmctx, callee_vmctx_offset); @@ -161,32 +152,21 @@ where stack.insert(location as usize, Val::reg(caller_vmctx)); stack.insert(location as usize, Val::reg(callee_vmctx)); ( - self.abi.sig(&sig, &CallingConvention::Default), + ::sig(&sig, &CallingConvention::Default), Some(callee_addr), ) } else { - (self.abi.sig(&callee.ty, &CallingConvention::Default), None) + ( + ::sig(&callee.ty, &CallingConvention::Default), + None, + ) }; - let fncall = FnCall::new::(&sig, &mut self.context, self.masm); - let alignment = self.abi.call_stack_align(); - let addend = self.abi.arg_base_offset(); + let fncall = FnCall::new::(&sig, &mut self.context, self.masm); if let Some(addr) = callee_addr { - fncall.indirect::( - self.masm, - &mut self.context, - addr, - alignment.into(), - addend.into(), - ); + fncall.indirect::(self.masm, &mut self.context, addr); } else { - fncall.direct::( - self.masm, - &mut self.context, - index, - alignment.into(), - addend.into(), - ); + fncall.direct::(self.masm, &mut self.context, index); } } diff --git a/winch/codegen/src/frame/mod.rs b/winch/codegen/src/frame/mod.rs index dd40880afbec..17dbd1164fae 100644 --- a/winch/codegen/src/frame/mod.rs +++ b/winch/codegen/src/frame/mod.rs @@ -81,8 +81,8 @@ pub(crate) struct Frame { impl Frame { /// Allocate a new Frame. - pub fn new(sig: &ABISig, defined_locals: &DefinedLocals, abi: &A) -> Result { - let (mut locals, defined_locals_start) = Self::compute_arg_slots(sig, abi)?; + pub fn new(sig: &ABISig, defined_locals: &DefinedLocals) -> Result { + let (mut locals, defined_locals_start) = Self::compute_arg_slots::(sig)?; // The defined locals have a zero-based offset by default // so we need to add the defined locals start to the offset. @@ -96,7 +96,7 @@ impl Frame { let vmctx_slots_size = ::word_bytes(); let vmctx_offset = defined_locals_start + defined_locals.stack_size + vmctx_slots_size; - let locals_size = align_to(vmctx_offset, abi.stack_align().into()); + let locals_size = align_to(vmctx_offset, ::stack_align().into()); Ok(Self { locals, @@ -113,7 +113,7 @@ impl Frame { self.locals.get(index as usize) } - fn compute_arg_slots(sig: &ABISig, abi: &A) -> Result<(Locals, u32)> { + fn compute_arg_slots(sig: &ABISig) -> Result<(Locals, u32)> { // Go over the function ABI-signature and // calculate the stack slots. // @@ -142,7 +142,7 @@ impl Frame { // we want positive addressing from the stack pointer // for both locals and stack arguments. - let arg_base_offset = abi.arg_base_offset().into(); + let arg_base_offset = ::arg_base_offset().into(); let mut next_stack = 0u32; let slots: Locals = sig .params diff --git a/winch/codegen/src/isa/aarch64/abi.rs b/winch/codegen/src/isa/aarch64/abi.rs index 2b3a335b7482..8479aa9b4c4b 100644 --- a/winch/codegen/src/isa/aarch64/abi.rs +++ b/winch/codegen/src/isa/aarch64/abi.rs @@ -43,15 +43,15 @@ impl RegIndexEnv { impl ABI for Aarch64ABI { // TODO change to 16 once SIMD is supported - fn stack_align(&self) -> u8 { + fn stack_align() -> u8 { 8 } - fn call_stack_align(&self) -> u8 { + fn call_stack_align() -> u8 { 16 } - fn arg_base_offset(&self) -> u8 { + fn arg_base_offset() -> u8 { 16 } @@ -63,7 +63,7 @@ impl ABI for Aarch64ABI { 64 } - fn sig(&self, wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig { + fn sig(wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig { assert!(call_conv.is_apple_aarch64() || call_conv.is_default()); if wasm_sig.results().len() > 1 { @@ -162,8 +162,7 @@ mod tests { fn xreg_abi_sig() { let wasm_sig = FuncType::new([I32, I64, I32, I64, I32, I32, I64, I32, I64], []); - let abi = Aarch64ABI::default(); - let sig = abi.sig(&wasm_sig, &CallingConvention::Default); + let sig = Aarch64ABI::sig(&wasm_sig, &CallingConvention::Default); let params = sig.params; match_reg_arg(params.get(0).unwrap(), I32, regs::xreg(0)); @@ -181,8 +180,7 @@ mod tests { fn vreg_abi_sig() { let wasm_sig = FuncType::new([F32, F64, F32, F64, F32, F32, F64, F32, F64], []); - let abi = Aarch64ABI::default(); - let sig = abi.sig(&wasm_sig, &CallingConvention::Default); + let sig = Aarch64ABI::sig(&wasm_sig, &CallingConvention::Default); let params = sig.params; match_reg_arg(params.get(0).unwrap(), F32, regs::vreg(0)); @@ -200,8 +198,7 @@ mod tests { fn mixed_abi_sig() { let wasm_sig = FuncType::new([F32, I32, I64, F64, I32, F32, F64, F32, F64], []); - let abi = Aarch64ABI::default(); - let sig = abi.sig(&wasm_sig, &CallingConvention::Default); + let sig = Aarch64ABI::sig(&wasm_sig, &CallingConvention::Default); let params = sig.params; match_reg_arg(params.get(0).unwrap(), F32, regs::vreg(0)); diff --git a/winch/codegen/src/isa/aarch64/masm.rs b/winch/codegen/src/isa/aarch64/masm.rs index 47df97d7b57d..c637b8598b3e 100644 --- a/winch/codegen/src/isa/aarch64/masm.rs +++ b/winch/codegen/src/isa/aarch64/masm.rs @@ -1,10 +1,11 @@ use super::{ + abi::Aarch64ABI, address::Address, asm::{Assembler, Operand}, regs, }; use crate::{ - abi::local::LocalSlot, + abi::{self, local::LocalSlot}, codegen::CodeGenContext, isa::reg::Reg, masm::{CalleeKind, DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind}, @@ -54,6 +55,8 @@ impl MacroAssembler { impl Masm for MacroAssembler { type Address = Address; + type Ptr = u8; + type ABI = Aarch64ABI; fn prologue(&mut self) { let lr = regs::lr(); @@ -138,8 +141,6 @@ impl Masm for MacroAssembler { fn call( &mut self, - _alignment: u32, - _addend: u32, _stack_args_size: u32, _load_callee: impl FnMut(&mut Self) -> CalleeKind, ) -> u32 { @@ -191,9 +192,7 @@ impl Masm for MacroAssembler { } fn push(&mut self, reg: Reg) -> u32 { - // The push is counted as pushing the 64-bit width in - // 64-bit architectures. - let size = 8u32; + let size = ::word_bytes(); self.reserve_stack(size); let address = Address::from_shadow_sp(size as i64); self.asm.str(reg, address, OperandSize::S64); diff --git a/winch/codegen/src/isa/aarch64/mod.rs b/winch/codegen/src/isa/aarch64/mod.rs index 2c9808fc1fe6..64b02a27508f 100644 --- a/winch/codegen/src/isa/aarch64/mod.rs +++ b/winch/codegen/src/isa/aarch64/mod.rs @@ -92,16 +92,15 @@ impl TargetIsa for Aarch64 { let mut body = body.get_binary_reader(); let mut masm = Aarch64Masm::new(self.shared_flags.clone()); let stack = Stack::new(); - let abi = abi::Aarch64ABI::default(); - let abi_sig = abi.sig(sig, &CallingConvention::Default); + let abi_sig = abi::Aarch64ABI::sig(sig, &CallingConvention::Default); let defined_locals = DefinedLocals::new(&mut body, validator)?; - let frame = Frame::new(&abi_sig, &defined_locals, &abi)?; + let frame = Frame::new::(&abi_sig, &defined_locals)?; // TODO: Add floating point bitmask let regalloc = RegAlloc::new(RegSet::new(ALL_GPR, 0), scratch()); let codegen_context = CodeGenContext::new(regalloc, stack, &frame); let env = FuncEnv::new(self.pointer_bytes(), translation); - let mut codegen = CodeGen::new(&mut masm, &abi, codegen_context, env, abi_sig); + let mut codegen = CodeGen::new(&mut masm, codegen_context, env, abi_sig); codegen.emit(&mut body, validator)?; Ok(masm.finalize()) diff --git a/winch/codegen/src/isa/x64/abi.rs b/winch/codegen/src/isa/x64/abi.rs index e02c264c241d..bc4ddd6e6a89 100644 --- a/winch/codegen/src/isa/x64/abi.rs +++ b/winch/codegen/src/isa/x64/abi.rs @@ -59,15 +59,15 @@ pub(crate) struct X64ABI; impl ABI for X64ABI { // TODO: change to 16 once SIMD is supported - fn stack_align(&self) -> u8 { + fn stack_align() -> u8 { 8 } - fn call_stack_align(&self) -> u8 { + fn call_stack_align() -> u8 { 16 } - fn arg_base_offset(&self) -> u8 { + fn arg_base_offset() -> u8 { // Two 8-byte slots, one for the return address and another // one for the frame pointer. // ┌──────────┬───────── Argument base @@ -96,7 +96,7 @@ impl ABI for X64ABI { 64 } - fn sig(&self, wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig { + fn sig(wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig { assert!(call_conv.is_fastcall() || call_conv.is_systemv() || call_conv.is_default()); if wasm_sig.results().len() > 1 { @@ -252,8 +252,7 @@ mod tests { fn int_abi_sig() { let wasm_sig = FuncType::new([I32, I64, I32, I64, I32, I32, I64, I32], []); - let abi = X64ABI::default(); - let sig = abi.sig(&wasm_sig, &CallingConvention::Default); + let sig = X64ABI::sig(&wasm_sig, &CallingConvention::Default); let params = sig.params; match_reg_arg(params.get(0).unwrap(), I32, regs::rdi()); @@ -270,8 +269,7 @@ mod tests { fn float_abi_sig() { let wasm_sig = FuncType::new([F32, F64, F32, F64, F32, F32, F64, F32, F64], []); - let abi = X64ABI::default(); - let sig = abi.sig(&wasm_sig, &CallingConvention::Default); + let sig = X64ABI::sig(&wasm_sig, &CallingConvention::Default); let params = sig.params; match_reg_arg(params.get(0).unwrap(), F32, regs::xmm0()); @@ -289,8 +287,7 @@ mod tests { fn mixed_abi_sig() { let wasm_sig = FuncType::new([F32, I32, I64, F64, I32, F32, F64, F32, F64], []); - let abi = X64ABI::default(); - let sig = abi.sig(&wasm_sig, &CallingConvention::Default); + let sig = X64ABI::sig(&wasm_sig, &CallingConvention::Default); let params = sig.params; match_reg_arg(params.get(0).unwrap(), F32, regs::xmm0()); @@ -308,8 +305,7 @@ mod tests { fn system_v_call_conv() { let wasm_sig = FuncType::new([F32, I32, I64, F64, I32, F32, F64, F32, F64], []); - let abi = X64ABI::default(); - let sig = abi.sig(&wasm_sig, &CallingConvention::WasmtimeSystemV); + let sig = X64ABI::sig(&wasm_sig, &CallingConvention::WasmtimeSystemV); let params = sig.params; match_reg_arg(params.get(0).unwrap(), F32, regs::xmm0()); @@ -327,8 +323,7 @@ mod tests { fn fastcall_call_conv() { let wasm_sig = FuncType::new([F32, I32, I64, F64, I32, F32, F64, F32, F64], []); - let abi = X64ABI::default(); - let sig = abi.sig(&wasm_sig, &CallingConvention::WasmtimeFastcall); + let sig = X64ABI::sig(&wasm_sig, &CallingConvention::WasmtimeFastcall); let params = sig.params; match_reg_arg(params.get(0).unwrap(), F32, regs::xmm0()); diff --git a/winch/codegen/src/isa/x64/masm.rs b/winch/codegen/src/isa/x64/masm.rs index 66af86a15713..43ad329ed57e 100644 --- a/winch/codegen/src/isa/x64/masm.rs +++ b/winch/codegen/src/isa/x64/masm.rs @@ -1,11 +1,12 @@ use super::{ + abi::X64ABI, address::Address, asm::{Assembler, Operand}, regs::{self, rbp, rsp}, }; use crate::masm::{DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind}; use crate::{ - abi::{align_to, calculate_frame_adjustment, LocalSlot}, + abi::{self, align_to, calculate_frame_adjustment, LocalSlot}, codegen::CodeGenContext, stack::Val, }; @@ -45,6 +46,8 @@ impl From
for Operand { impl Masm for MacroAssembler { type Address = Address; + type Ptr = u8; + type ABI = X64ABI; fn prologue(&mut self) { let frame_pointer = rbp(); @@ -57,10 +60,7 @@ impl Masm for MacroAssembler { fn push(&mut self, reg: Reg) -> u32 { self.asm.push_r(reg); - // In x64 the push instruction takes either - // 2 or 8 bytes; in our case we're always - // assuming 8 bytes per push. - self.increment_sp(8); + self.increment_sp(::word_bytes()); self.sp_offset } @@ -114,17 +114,16 @@ impl Masm for MacroAssembler { fn pop(&mut self, dst: Reg) { self.asm.pop_r(dst); - // Similar to the comment in `push`, we assume 8 bytes per pop. - self.decrement_sp(8); + self.decrement_sp(::word_bytes()); } fn call( &mut self, - alignment: u32, - addend: u32, stack_args_size: u32, mut load_callee: impl FnMut(&mut Self) -> CalleeKind, ) -> u32 { + let alignment: u32 = ::call_stack_align().into(); + let addend: u32 = ::arg_base_offset().into(); let delta = calculate_frame_adjustment(self.sp_offset(), addend, alignment); let aligned_args_size = align_to(stack_args_size, alignment); let total_stack = delta + aligned_args_size; diff --git a/winch/codegen/src/isa/x64/mod.rs b/winch/codegen/src/isa/x64/mod.rs index bf1de9777406..3ce61f1b9dce 100644 --- a/winch/codegen/src/isa/x64/mod.rs +++ b/winch/codegen/src/isa/x64/mod.rs @@ -96,16 +96,15 @@ impl TargetIsa for X64 { let mut body = body.get_binary_reader(); let mut masm = X64Masm::new(self.shared_flags.clone(), self.isa_flags.clone()); let stack = Stack::new(); - let abi = abi::X64ABI::default(); - let abi_sig = abi.sig(sig, &CallingConvention::Default); + let abi_sig = abi::X64ABI::sig(sig, &CallingConvention::Default); let defined_locals = DefinedLocals::new(&mut body, validator)?; - let frame = Frame::new(&abi_sig, &defined_locals, &abi)?; + let frame = Frame::new::(&abi_sig, &defined_locals)?; // TODO Add in floating point bitmask let regalloc = RegAlloc::new(RegSet::new(ALL_GPR, 0), regs::scratch()); let codegen_context = CodeGenContext::new(regalloc, stack, &frame); let env = FuncEnv::new(self.pointer_bytes(), translation); - let mut codegen = CodeGen::new(&mut masm, &abi, codegen_context, env, abi_sig); + let mut codegen = CodeGen::new(&mut masm, codegen_context, env, abi_sig); codegen.emit(&mut body, validator)?; @@ -128,13 +127,11 @@ impl TargetIsa for X64 { ) -> Result> { use TrampolineKind::*; - let abi = abi::X64ABI::default(); let mut masm = X64Masm::new(self.shared_flags.clone(), self.isa_flags.clone()); let call_conv = self.wasmtime_call_conv(); let mut trampoline = Trampoline::new( &mut masm, - &abi, regs::scratch(), regs::argv(), &call_conv, diff --git a/winch/codegen/src/masm.rs b/winch/codegen/src/masm.rs index f713bf3fc8d6..ce17f898d4a8 100644 --- a/winch/codegen/src/masm.rs +++ b/winch/codegen/src/masm.rs @@ -1,9 +1,10 @@ -use crate::abi::{align_to, LocalSlot}; +use crate::abi::{self, align_to, LocalSlot}; use crate::codegen::CodeGenContext; use crate::isa::reg::Reg; use crate::regalloc::RegAlloc; use cranelift_codegen::{Final, MachBufferFinalized}; use std::{fmt::Debug, ops::Range}; +use wasmtime_environ::PtrSize; #[derive(Eq, PartialEq)] pub(crate) enum DivKind { @@ -86,6 +87,13 @@ pub(crate) trait MacroAssembler { /// The addressing mode. type Address: Copy; + /// The pointer representation of the target ISA, + /// used to access information from [`VMOffsets`]. + type Ptr: PtrSize; + + /// The ABI details of the target. + type ABI: abi::ABI; + /// Emit the function prologue. fn prologue(&mut self); @@ -115,13 +123,7 @@ pub(crate) trait MacroAssembler { fn address_at_reg(&self, reg: Reg, offset: u32) -> Self::Address; /// Emit a function call to either a local or external function. - fn call( - &mut self, - alignment: u32, - addend: u32, - stack_args_size: u32, - f: impl FnMut(&mut Self) -> CalleeKind, - ) -> u32; + fn call(&mut self, stack_args_size: u32, f: impl FnMut(&mut Self) -> CalleeKind) -> u32; /// Get stack pointer offset. fn sp_offset(&self) -> u32; @@ -177,7 +179,8 @@ pub(crate) trait MacroAssembler { /// The default implementation divides the given memory range /// into word-sized slots. Then it unrolls a series of store /// instructions, effectively assigning zero to each slot. - fn zero_mem_range(&mut self, mem: &Range, word_size: u32, regalloc: &mut RegAlloc) { + fn zero_mem_range(&mut self, mem: &Range, regalloc: &mut RegAlloc) { + let word_size = ::word_bytes(); if mem.is_empty() { return; } diff --git a/winch/codegen/src/trampoline.rs b/winch/codegen/src/trampoline.rs index b310735b9c98..52cea8af8bfc 100644 --- a/winch/codegen/src/trampoline.rs +++ b/winch/codegen/src/trampoline.rs @@ -34,15 +34,12 @@ pub enum TrampolineKind { } /// The main trampoline abstraction. -pub(crate) struct Trampoline<'a, A, M> +pub(crate) struct Trampoline<'a, M> where - A: ABI, M: MacroAssembler, { /// The macro assembler. masm: &'a mut M, - /// The ABI. - abi: &'a A, /// The main scratch register for the current architecture. It is /// not allocatable for the callee. scratch_reg: Reg, @@ -58,29 +55,26 @@ where /// calling convention. call_conv: &'a CallingConvention, /// The pointer size of the current ISA. - pointer_size: u8, + pointer_size: M::Ptr, } -impl<'a, A, M> Trampoline<'a, A, M> +impl<'a, M> Trampoline<'a, M> where - A: ABI, M: MacroAssembler, { /// Create a new trampoline. pub fn new( masm: &'a mut M, - abi: &'a A, scratch_reg: Reg, alloc_scratch_reg: Reg, call_conv: &'a CallingConvention, - pointer_size: u8, + pointer_size: M::Ptr, ) -> Self { Self { masm, - abi, scratch_reg, alloc_scratch_reg, - callee_saved_regs: ::callee_saved_regs(call_conv), + callee_saved_regs: ::callee_saved_regs(call_conv), call_conv, pointer_size, } @@ -109,7 +103,7 @@ where self.masm.mov( vmctx.into(), - ::vmctx_reg().into(), + ::vmctx_reg().into(), OperandSize::S64, ); @@ -119,34 +113,24 @@ where let val_ptr_offset = offsets[2]; // Call the function that was passed into the trampoline. - let allocated_stack = self.masm.call( - self.abi.call_stack_align().into(), - self.abi.arg_base_offset().into(), - wasm_sig.stack_bytes, - |masm| { - // Save the SP when entering Wasm. - // TODO: Once Winch supports comparison operators, - // check that the caller VM context is what we expect. - // See [`wasmtime_environ::MAGIC`]. - Self::save_last_wasm_entry_sp( - masm, - vmctx_runtime_limits_addr, - self.scratch_reg, - &self.pointer_size, - ); - - // Move the values register to the scratch - // register for argument assignment. - masm.mov(*val_ptr, self.scratch_reg.into(), OperandSize::S64); - Self::assign_args_from_array( - masm, - &wasm_sig, - self.scratch_reg, - self.alloc_scratch_reg, - ); - CalleeKind::Direct(callee_index.as_u32()) - }, - ); + let allocated_stack = self.masm.call(wasm_sig.stack_bytes, |masm| { + // Save the SP when entering Wasm. + // TODO: Once Winch supports comparison operators, + // check that the caller VM context is what we expect. + // See [`wasmtime_environ::MAGIC`]. + Self::save_last_wasm_entry_sp( + masm, + vmctx_runtime_limits_addr, + self.scratch_reg, + &self.pointer_size, + ); + + // Move the values register to the scratch + // register for argument assignment. + masm.mov(*val_ptr, self.scratch_reg.into(), OperandSize::S64); + Self::assign_args_from_array(masm, &wasm_sig, self.scratch_reg, self.alloc_scratch_reg); + CalleeKind::Direct(callee_index.as_u32()) + }); self.masm.free_stack(allocated_stack); @@ -183,39 +167,34 @@ where // Move the VM context pointer to the designated pinned register. self.masm.mov( vmctx.into(), - ::vmctx_reg().into(), + ::vmctx_reg().into(), OperandSize::S64, ); let vmctx_runtime_limits_addr = self.vmctx_runtime_limits_addr(caller_vmctx); let (offsets, spill_size) = self.spill(&native_sig.params); - let reserved_stack = self.masm.call( - self.abi.call_stack_align().into(), - self.abi.arg_base_offset().into(), - wasm_sig.stack_bytes, - |masm| { - // Save the SP when entering Wasm. - // TODO: Once Winch supports comparison operators, - // check that the caller VM context is what we expect. - // See [`wasmtime_environ::MAGIC`]. - Self::save_last_wasm_entry_sp( - masm, - vmctx_runtime_limits_addr, - self.scratch_reg, - &self.pointer_size, - ); - Self::assign_args( - masm, - &wasm_sig.params, - &native_sig.params[2..], - &offsets[2..], - self.scratch_reg, - self.abi.arg_base_offset().into(), - ); - CalleeKind::Direct(callee_index.as_u32()) - }, - ); + let reserved_stack = self.masm.call(wasm_sig.stack_bytes, |masm| { + // Save the SP when entering Wasm. + // TODO: Once Winch supports comparison operators, + // check that the caller VM context is what we expect. + // See [`wasmtime_environ::MAGIC`]. + Self::save_last_wasm_entry_sp( + masm, + vmctx_runtime_limits_addr, + self.scratch_reg, + &self.pointer_size, + ); + Self::assign_args( + masm, + &wasm_sig.params, + &native_sig.params[2..], + &offsets[2..], + self.scratch_reg, + ::arg_base_offset().into(), + ); + CalleeKind::Direct(callee_index.as_u32()) + }); self.masm.free_stack(reserved_stack); self.epilogue_with_callee_saved_restore(spill_size); @@ -251,35 +230,30 @@ where let (offsets, spill_size) = self.spill(&wasm_sig.params); - let reserved_stack = self.masm.call( - self.abi.call_stack_align().into(), - self.abi.arg_base_offset().into(), - native_sig.stack_bytes, - |masm| { - // Move the VM context into one of the scratch registers. - masm.mov( - vmctx.into(), - self.alloc_scratch_reg.into(), - OperandSize::S64, - ); - - Self::assign_args( - masm, - &native_sig.params, - &wasm_sig.params, - &offsets, - self.scratch_reg, - self.abi.arg_base_offset().into(), - ); - - let body_offset = self.pointer_size.vmnative_call_host_func_context_func_ref() - + self.pointer_size.vm_func_ref_native_call(); - let callee_addr = masm.address_at_reg(self.alloc_scratch_reg, body_offset.into()); - masm.load(callee_addr, self.scratch_reg, OperandSize::S64); - - CalleeKind::Indirect(self.scratch_reg) - }, - ); + let reserved_stack = self.masm.call(native_sig.stack_bytes, |masm| { + // Move the VM context into one of the scratch registers. + masm.mov( + vmctx.into(), + self.alloc_scratch_reg.into(), + OperandSize::S64, + ); + + Self::assign_args( + masm, + &native_sig.params, + &wasm_sig.params, + &offsets, + self.scratch_reg, + ::arg_base_offset().into(), + ); + + let body_offset = self.pointer_size.vmnative_call_host_func_context_func_ref() + + self.pointer_size.vm_func_ref_native_call(); + let callee_addr = masm.address_at_reg(self.alloc_scratch_reg, body_offset.into()); + masm.load(callee_addr, self.scratch_reg, OperandSize::S64); + + CalleeKind::Indirect(self.scratch_reg) + }); self.masm.free_stack(reserved_stack); self.epilogue(spill_size); @@ -298,7 +272,7 @@ where arg_base_offset: u32, ) { assert!(callee_params.len() == caller_params.len()); - let fp = ::fp_reg(); + let fp = ::fp_reg(); let mut offset_index = 0; callee_params @@ -359,12 +333,12 @@ where params.extend_from_slice(ty.params()); let native_type = FuncType::new(params, ty.results().to_owned()); - self.abi.sig(&native_type, self.call_conv) + ::sig(&native_type, self.call_conv) } /// Returns a signature using the Winch's default calling convention. fn wasm_sig(&self, ty: &FuncType) -> ABISig { - self.abi.sig(ty, &CallingConvention::Default) + ::sig(ty, &CallingConvention::Default) } /// Returns the register pair containing the callee and caller VM context pointers. @@ -404,7 +378,7 @@ where // The stack size for the spill, calculated // from the number of spilled register times // the size of each push (8 bytes). - let size = spilled * ::word_bytes(); + let size = spilled * ::word_bytes(); (offsets, size) } @@ -445,7 +419,7 @@ where scratch: Reg, ptr: &impl PtrSize, ) { - let sp = ::sp_reg(); + let sp = ::sp_reg(); masm.load(vm_runtime_limits_addr, scratch, OperandSize::S64); let addr = masm.address_at_reg(scratch, ptr.vmruntime_limits_last_wasm_entry_sp().into()); masm.store(sp.into(), addr, OperandSize::S64); @@ -469,13 +443,13 @@ where ); // Handle the frame pointer. - let fp = ::fp_reg(); + let fp = ::fp_reg(); let fp_addr = masm.address_at_reg(fp, 0); masm.load(fp_addr, scratch, OperandSize::S64); masm.store(scratch.into(), last_wasm_exit_fp_addr, OperandSize::S64); // Handle the return address. - let ret_addr_offset = ::ret_addr_offset(); + let ret_addr_offset = ::ret_addr_offset(); let ret_addr = masm.address_at_reg(fp, ret_addr_offset.into()); masm.load(ret_addr, scratch, OperandSize::S64); masm.store(scratch.into(), last_wasm_exit_pc_addr, OperandSize::S64); diff --git a/winch/codegen/src/visitor.rs b/winch/codegen/src/visitor.rs index 7c11f91bdff9..c686f4ed5fb2 100644 --- a/winch/codegen/src/visitor.rs +++ b/winch/codegen/src/visitor.rs @@ -4,13 +4,12 @@ //! which validates and dispatches to the corresponding //! machine code emitter. -use crate::abi::ABI; use crate::codegen::CodeGen; use crate::masm::{DivKind, MacroAssembler, OperandSize, RegImm, RemKind}; use crate::stack::Val; use wasmparser::ValType; use wasmparser::VisitOperator; -use wasmtime_environ::{FuncIndex, PtrSize}; +use wasmtime_environ::FuncIndex; /// A macro to define unsupported WebAssembly operators. /// @@ -58,11 +57,9 @@ macro_rules! def_unsupported { (emit $unsupported:tt $($rest:tt)*) => {$($rest)*}; } -impl<'a, A, M, P> VisitOperator<'a> for CodeGen<'a, A, M, P> +impl<'a, M> VisitOperator<'a> for CodeGen<'a, M> where M: MacroAssembler, - A: ABI, - P: PtrSize, { type Output = ();