From 8c9bbb55e10f9c886529a7c586a7982d17b8d943 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C3=BAl=20Cabrera?= Date: Wed, 24 May 2023 18:17:00 -0400 Subject: [PATCH] winch: Refactor the Masm associated types This commit is a follow up to https://github.com/bytecodealliance/wasmtime/pull/6443, in which we discussed potentially having `PtrSize` and `ABI` as associated types to the `MacroAssembler` trait. I considered having `PtrSize` associated to the `ABI`, but given the amount of ABI details needed at the `MacroAssembler` level, I decided to go with the approach in this change. The chosen approach ended up cutting a decent amount of boilerplate from the `MacroAssembler` itself, but also from each of the touchpoints where the `MacroAssembler` is used. This change also standardizes the signatures of the `ABI` trait. Some of them borrowed `&self` and some didn't, but in practice, there's no need to have any of them borrow `&self`. --- winch/codegen/src/abi/mod.rs | 9 +++--- winch/codegen/src/codegen/call.rs | 28 ++++++++--------- winch/codegen/src/codegen/mod.rs | 41 +++++++++---------------- winch/codegen/src/frame/mod.rs | 10 +++--- winch/codegen/src/isa/aarch64/abi.rs | 8 ++--- winch/codegen/src/isa/aarch64/masm.rs | 11 +++---- winch/codegen/src/isa/aarch64/mod.rs | 7 ++--- winch/codegen/src/isa/x64/abi.rs | 8 ++--- winch/codegen/src/isa/x64/masm.rs | 17 +++++------ winch/codegen/src/isa/x64/mod.rs | 9 ++---- winch/codegen/src/masm.rs | 16 +++++++--- winch/codegen/src/trampoline.rs | 44 ++++++++++----------------- winch/codegen/src/visitor.rs | 7 ++--- 13 files changed, 93 insertions(+), 122 deletions(-) diff --git a/winch/codegen/src/abi/mod.rs b/winch/codegen/src/abi/mod.rs index 79e5229198f4..165122c5d46f 100644 --- a/winch/codegen/src/abi/mod.rs +++ b/winch/codegen/src/abi/mod.rs @@ -55,21 +55,22 @@ pub(crate) use local::*; /// information about alignment, parameter passing, usage of /// specific registers, etc. pub(crate) trait ABI { + /// The required stack alignment. - fn stack_align(&self) -> u8; + fn stack_align() -> u8; /// The required stack alignment for calls. - fn call_stack_align(&self) -> u8; + fn call_stack_align() -> u8; /// The offset to the argument base, relative to the frame pointer. - fn arg_base_offset(&self) -> u8; + fn arg_base_offset() -> u8; /// The offset to the return address, relative to the frame pointer. fn ret_addr_offset() -> u8; /// Construct the ABI-specific signature from a WebAssembly /// function type. - fn sig(&self, wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig; + fn sig(wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig; /// Returns the number of bits in a word. fn word_bits() -> u32; diff --git a/winch/codegen/src/codegen/call.rs b/winch/codegen/src/codegen/call.rs index c1eae0b65545..406146b6392d 100644 --- a/winch/codegen/src/codegen/call.rs +++ b/winch/codegen/src/codegen/call.rs @@ -84,7 +84,7 @@ impl<'a> FnCall<'a> { /// want to calculate any adjustments to the caller's frame, after /// having saved any live registers, so that we can account for /// any pushes generated by register spilling. - pub fn new( + pub fn new( callee_sig: &'a ABISig, context: &mut CodeGenContext, masm: &mut M, @@ -135,47 +135,43 @@ impl<'a> FnCall<'a> { Self { abi_sig: &callee_sig, arg_stack_space, - call_stack_space: (spilled_regs * ::word_bytes()) - + (memory_values * ::word_bytes()), + call_stack_space: (spilled_regs * ::word_bytes()) + + (memory_values * ::word_bytes()), sp_offset_at_callsite, } } /// Emit a direct function call, to a locally defined function. - pub fn direct( + pub fn direct( &self, masm: &mut M, context: &mut CodeGenContext, callee: FuncIndex, - alignment: u32, - addend: u32, ) { - let reserved_stack = masm.call(alignment, addend, self.arg_stack_space, |masm| { - self.assign_args(context, masm, ::scratch_reg()); + let reserved_stack = masm.call(self.arg_stack_space, |masm| { + self.assign_args(context, masm, ::scratch_reg()); CalleeKind::Direct(callee.as_u32()) }); - self.post_call::(masm, context, reserved_stack); + self.post_call::(masm, context, reserved_stack); } /// Emit an indirect function call, using a raw address. - pub fn indirect( + pub fn indirect( &self, masm: &mut M, context: &mut CodeGenContext, addr: M::Address, - alignment: u32, - addend: u32, ) { - let reserved_stack = masm.call(alignment, addend, self.arg_stack_space, |masm| { - let scratch = ::scratch_reg(); + let reserved_stack = masm.call(self.arg_stack_space, |masm| { + let scratch = ::scratch_reg(); self.assign_args(context, masm, scratch); masm.load(addr, scratch, OperandSize::S64); CalleeKind::Indirect(scratch) }); - self.post_call::(masm, context, reserved_stack); + self.post_call::(masm, context, reserved_stack); } - fn post_call( + fn post_call( &self, masm: &mut M, context: &mut CodeGenContext, diff --git a/winch/codegen/src/codegen/mod.rs b/winch/codegen/src/codegen/mod.rs index 9cb32946b71c..024199050219 100644 --- a/winch/codegen/src/codegen/mod.rs +++ b/winch/codegen/src/codegen/mod.rs @@ -9,7 +9,7 @@ use call::FnCall; use wasmparser::{ BinaryReader, FuncType, FuncValidator, ValType, ValidatorResources, VisitOperator, }; -use wasmtime_environ::{FuncIndex, PtrSize}; +use wasmtime_environ::FuncIndex; mod context; pub(crate) use context::*; @@ -18,11 +18,9 @@ pub use env::*; pub mod call; /// The code generation abstraction. -pub(crate) struct CodeGen<'a, A, M, P> +pub(crate) struct CodeGen<'a, M> where M: MacroAssembler, - A: ABI, - P: PtrSize, { /// The ABI-specific representation of the function signature, excluding results. sig: ABISig, @@ -31,33 +29,29 @@ where pub context: CodeGenContext<'a>, /// A reference to the function compilation environment. - pub env: FuncEnv<'a, P>, + pub env: FuncEnv<'a, M::Ptr>, /// The MacroAssembler. pub masm: &'a mut M, - /// A reference to the current ABI. - pub abi: &'a A, + // A reference to the current ABI. + // pub abi: &'a M::ABI, } -impl<'a, A, M, P> CodeGen<'a, A, M, P> +impl<'a, M> CodeGen<'a, M> where M: MacroAssembler, - A: ABI, - P: PtrSize, { pub fn new( masm: &'a mut M, - abi: &'a A, context: CodeGenContext<'a>, - env: FuncEnv<'a, P>, + env: FuncEnv<'a, M::Ptr>, sig: ABISig, ) -> Self { Self { sig, context, masm, - abi, env, } } @@ -91,7 +85,6 @@ where let defined_locals_range = &self.context.frame.defined_locals_range; self.masm.zero_mem_range( defined_locals_range.as_range(), - ::word_bytes(), &mut self.context.regalloc, ); @@ -99,7 +92,7 @@ where // at any point. let vmctx_addr = self.masm.local_address(&self.context.frame.vmctx_slot); self.masm - .store(::vmctx_reg().into(), vmctx_addr, OperandSize::S64); + .store(::vmctx_reg().into(), vmctx_addr, OperandSize::S64); while !body.eof() { let offset = body.original_position(); @@ -141,7 +134,7 @@ where params.extend_from_slice(&callee.ty.params()); let sig = FuncType::new(params, callee.ty.results().to_owned()); - let caller_vmctx = ::vmctx_reg(); + let caller_vmctx = ::vmctx_reg(); let callee_vmctx = self.context.any_gpr(self.masm); let callee_vmctx_offset = self.env.vmoffsets.vmctx_vmfunction_import_vmctx(index); let callee_vmctx_addr = self.masm.address_at_reg(caller_vmctx, callee_vmctx_offset); @@ -161,31 +154,25 @@ where stack.insert(location as usize, Val::reg(caller_vmctx)); stack.insert(location as usize, Val::reg(callee_vmctx)); ( - self.abi.sig(&sig, &CallingConvention::Default), + ::sig(&sig, &CallingConvention::Default), Some(callee_addr), ) } else { - (self.abi.sig(&callee.ty, &CallingConvention::Default), None) + (::sig(&callee.ty, &CallingConvention::Default), None) }; - let fncall = FnCall::new::(&sig, &mut self.context, self.masm); - let alignment = self.abi.call_stack_align(); - let addend = self.abi.arg_base_offset(); + let fncall = FnCall::new::(&sig, &mut self.context, self.masm); if let Some(addr) = callee_addr { - fncall.indirect::( + fncall.indirect::( self.masm, &mut self.context, addr, - alignment.into(), - addend.into(), ); } else { - fncall.direct::( + fncall.direct::( self.masm, &mut self.context, index, - alignment.into(), - addend.into(), ); } } diff --git a/winch/codegen/src/frame/mod.rs b/winch/codegen/src/frame/mod.rs index dd40880afbec..17dbd1164fae 100644 --- a/winch/codegen/src/frame/mod.rs +++ b/winch/codegen/src/frame/mod.rs @@ -81,8 +81,8 @@ pub(crate) struct Frame { impl Frame { /// Allocate a new Frame. - pub fn new(sig: &ABISig, defined_locals: &DefinedLocals, abi: &A) -> Result { - let (mut locals, defined_locals_start) = Self::compute_arg_slots(sig, abi)?; + pub fn new(sig: &ABISig, defined_locals: &DefinedLocals) -> Result { + let (mut locals, defined_locals_start) = Self::compute_arg_slots::(sig)?; // The defined locals have a zero-based offset by default // so we need to add the defined locals start to the offset. @@ -96,7 +96,7 @@ impl Frame { let vmctx_slots_size = ::word_bytes(); let vmctx_offset = defined_locals_start + defined_locals.stack_size + vmctx_slots_size; - let locals_size = align_to(vmctx_offset, abi.stack_align().into()); + let locals_size = align_to(vmctx_offset, ::stack_align().into()); Ok(Self { locals, @@ -113,7 +113,7 @@ impl Frame { self.locals.get(index as usize) } - fn compute_arg_slots(sig: &ABISig, abi: &A) -> Result<(Locals, u32)> { + fn compute_arg_slots(sig: &ABISig) -> Result<(Locals, u32)> { // Go over the function ABI-signature and // calculate the stack slots. // @@ -142,7 +142,7 @@ impl Frame { // we want positive addressing from the stack pointer // for both locals and stack arguments. - let arg_base_offset = abi.arg_base_offset().into(); + let arg_base_offset = ::arg_base_offset().into(); let mut next_stack = 0u32; let slots: Locals = sig .params diff --git a/winch/codegen/src/isa/aarch64/abi.rs b/winch/codegen/src/isa/aarch64/abi.rs index 2b3a335b7482..5fd89967edbd 100644 --- a/winch/codegen/src/isa/aarch64/abi.rs +++ b/winch/codegen/src/isa/aarch64/abi.rs @@ -43,15 +43,15 @@ impl RegIndexEnv { impl ABI for Aarch64ABI { // TODO change to 16 once SIMD is supported - fn stack_align(&self) -> u8 { + fn stack_align() -> u8 { 8 } - fn call_stack_align(&self) -> u8 { + fn call_stack_align() -> u8 { 16 } - fn arg_base_offset(&self) -> u8 { + fn arg_base_offset() -> u8 { 16 } @@ -63,7 +63,7 @@ impl ABI for Aarch64ABI { 64 } - fn sig(&self, wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig { + fn sig(wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig { assert!(call_conv.is_apple_aarch64() || call_conv.is_default()); if wasm_sig.results().len() > 1 { diff --git a/winch/codegen/src/isa/aarch64/masm.rs b/winch/codegen/src/isa/aarch64/masm.rs index 47df97d7b57d..3007e4501f16 100644 --- a/winch/codegen/src/isa/aarch64/masm.rs +++ b/winch/codegen/src/isa/aarch64/masm.rs @@ -2,9 +2,10 @@ use super::{ address::Address, asm::{Assembler, Operand}, regs, + abi::Aarch64ABI, }; use crate::{ - abi::local::LocalSlot, + abi::{local::LocalSlot, self}, codegen::CodeGenContext, isa::reg::Reg, masm::{CalleeKind, DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind}, @@ -54,6 +55,8 @@ impl MacroAssembler { impl Masm for MacroAssembler { type Address = Address; + type Ptr = u8; + type ABI = Aarch64ABI; fn prologue(&mut self) { let lr = regs::lr(); @@ -138,8 +141,6 @@ impl Masm for MacroAssembler { fn call( &mut self, - _alignment: u32, - _addend: u32, _stack_args_size: u32, _load_callee: impl FnMut(&mut Self) -> CalleeKind, ) -> u32 { @@ -191,9 +192,7 @@ impl Masm for MacroAssembler { } fn push(&mut self, reg: Reg) -> u32 { - // The push is counted as pushing the 64-bit width in - // 64-bit architectures. - let size = 8u32; + let size = ::word_bytes(); self.reserve_stack(size); let address = Address::from_shadow_sp(size as i64); self.asm.str(reg, address, OperandSize::S64); diff --git a/winch/codegen/src/isa/aarch64/mod.rs b/winch/codegen/src/isa/aarch64/mod.rs index 2c9808fc1fe6..64b02a27508f 100644 --- a/winch/codegen/src/isa/aarch64/mod.rs +++ b/winch/codegen/src/isa/aarch64/mod.rs @@ -92,16 +92,15 @@ impl TargetIsa for Aarch64 { let mut body = body.get_binary_reader(); let mut masm = Aarch64Masm::new(self.shared_flags.clone()); let stack = Stack::new(); - let abi = abi::Aarch64ABI::default(); - let abi_sig = abi.sig(sig, &CallingConvention::Default); + let abi_sig = abi::Aarch64ABI::sig(sig, &CallingConvention::Default); let defined_locals = DefinedLocals::new(&mut body, validator)?; - let frame = Frame::new(&abi_sig, &defined_locals, &abi)?; + let frame = Frame::new::(&abi_sig, &defined_locals)?; // TODO: Add floating point bitmask let regalloc = RegAlloc::new(RegSet::new(ALL_GPR, 0), scratch()); let codegen_context = CodeGenContext::new(regalloc, stack, &frame); let env = FuncEnv::new(self.pointer_bytes(), translation); - let mut codegen = CodeGen::new(&mut masm, &abi, codegen_context, env, abi_sig); + let mut codegen = CodeGen::new(&mut masm, codegen_context, env, abi_sig); codegen.emit(&mut body, validator)?; Ok(masm.finalize()) diff --git a/winch/codegen/src/isa/x64/abi.rs b/winch/codegen/src/isa/x64/abi.rs index e02c264c241d..ceda96cb3a0c 100644 --- a/winch/codegen/src/isa/x64/abi.rs +++ b/winch/codegen/src/isa/x64/abi.rs @@ -59,15 +59,15 @@ pub(crate) struct X64ABI; impl ABI for X64ABI { // TODO: change to 16 once SIMD is supported - fn stack_align(&self) -> u8 { + fn stack_align() -> u8 { 8 } - fn call_stack_align(&self) -> u8 { + fn call_stack_align() -> u8 { 16 } - fn arg_base_offset(&self) -> u8 { + fn arg_base_offset() -> u8 { // Two 8-byte slots, one for the return address and another // one for the frame pointer. // ┌──────────┬───────── Argument base @@ -96,7 +96,7 @@ impl ABI for X64ABI { 64 } - fn sig(&self, wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig { + fn sig(wasm_sig: &FuncType, call_conv: &CallingConvention) -> ABISig { assert!(call_conv.is_fastcall() || call_conv.is_systemv() || call_conv.is_default()); if wasm_sig.results().len() > 1 { diff --git a/winch/codegen/src/isa/x64/masm.rs b/winch/codegen/src/isa/x64/masm.rs index 66af86a15713..39b764ccb4c9 100644 --- a/winch/codegen/src/isa/x64/masm.rs +++ b/winch/codegen/src/isa/x64/masm.rs @@ -2,10 +2,11 @@ use super::{ address::Address, asm::{Assembler, Operand}, regs::{self, rbp, rsp}, + abi::X64ABI, }; use crate::masm::{DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind}; use crate::{ - abi::{align_to, calculate_frame_adjustment, LocalSlot}, + abi::{align_to, calculate_frame_adjustment, LocalSlot, self}, codegen::CodeGenContext, stack::Val, }; @@ -45,6 +46,8 @@ impl From
for Operand { impl Masm for MacroAssembler { type Address = Address; + type Ptr = u8; + type ABI = X64ABI; fn prologue(&mut self) { let frame_pointer = rbp(); @@ -57,10 +60,7 @@ impl Masm for MacroAssembler { fn push(&mut self, reg: Reg) -> u32 { self.asm.push_r(reg); - // In x64 the push instruction takes either - // 2 or 8 bytes; in our case we're always - // assuming 8 bytes per push. - self.increment_sp(8); + self.increment_sp(::word_bytes()); self.sp_offset } @@ -114,17 +114,16 @@ impl Masm for MacroAssembler { fn pop(&mut self, dst: Reg) { self.asm.pop_r(dst); - // Similar to the comment in `push`, we assume 8 bytes per pop. - self.decrement_sp(8); + self.decrement_sp(::word_bytes()); } fn call( &mut self, - alignment: u32, - addend: u32, stack_args_size: u32, mut load_callee: impl FnMut(&mut Self) -> CalleeKind, ) -> u32 { + let alignment: u32 = ::call_stack_align().into(); + let addend: u32 = ::arg_base_offset().into(); let delta = calculate_frame_adjustment(self.sp_offset(), addend, alignment); let aligned_args_size = align_to(stack_args_size, alignment); let total_stack = delta + aligned_args_size; diff --git a/winch/codegen/src/isa/x64/mod.rs b/winch/codegen/src/isa/x64/mod.rs index bf1de9777406..3ce61f1b9dce 100644 --- a/winch/codegen/src/isa/x64/mod.rs +++ b/winch/codegen/src/isa/x64/mod.rs @@ -96,16 +96,15 @@ impl TargetIsa for X64 { let mut body = body.get_binary_reader(); let mut masm = X64Masm::new(self.shared_flags.clone(), self.isa_flags.clone()); let stack = Stack::new(); - let abi = abi::X64ABI::default(); - let abi_sig = abi.sig(sig, &CallingConvention::Default); + let abi_sig = abi::X64ABI::sig(sig, &CallingConvention::Default); let defined_locals = DefinedLocals::new(&mut body, validator)?; - let frame = Frame::new(&abi_sig, &defined_locals, &abi)?; + let frame = Frame::new::(&abi_sig, &defined_locals)?; // TODO Add in floating point bitmask let regalloc = RegAlloc::new(RegSet::new(ALL_GPR, 0), regs::scratch()); let codegen_context = CodeGenContext::new(regalloc, stack, &frame); let env = FuncEnv::new(self.pointer_bytes(), translation); - let mut codegen = CodeGen::new(&mut masm, &abi, codegen_context, env, abi_sig); + let mut codegen = CodeGen::new(&mut masm, codegen_context, env, abi_sig); codegen.emit(&mut body, validator)?; @@ -128,13 +127,11 @@ impl TargetIsa for X64 { ) -> Result> { use TrampolineKind::*; - let abi = abi::X64ABI::default(); let mut masm = X64Masm::new(self.shared_flags.clone(), self.isa_flags.clone()); let call_conv = self.wasmtime_call_conv(); let mut trampoline = Trampoline::new( &mut masm, - &abi, regs::scratch(), regs::argv(), &call_conv, diff --git a/winch/codegen/src/masm.rs b/winch/codegen/src/masm.rs index f713bf3fc8d6..39b47006381c 100644 --- a/winch/codegen/src/masm.rs +++ b/winch/codegen/src/masm.rs @@ -1,9 +1,11 @@ -use crate::abi::{align_to, LocalSlot}; +use crate::abi::{align_to, LocalSlot, self}; use crate::codegen::CodeGenContext; use crate::isa::reg::Reg; use crate::regalloc::RegAlloc; use cranelift_codegen::{Final, MachBufferFinalized}; use std::{fmt::Debug, ops::Range}; +use wasmtime_environ::PtrSize; + #[derive(Eq, PartialEq)] pub(crate) enum DivKind { @@ -86,6 +88,13 @@ pub(crate) trait MacroAssembler { /// The addressing mode. type Address: Copy; + /// The pointer representation of the target ISA, + /// used to access information from [`VMOffsets`]. + type Ptr: PtrSize; + + /// The ABI details of the target. + type ABI: abi::ABI; + /// Emit the function prologue. fn prologue(&mut self); @@ -117,8 +126,6 @@ pub(crate) trait MacroAssembler { /// Emit a function call to either a local or external function. fn call( &mut self, - alignment: u32, - addend: u32, stack_args_size: u32, f: impl FnMut(&mut Self) -> CalleeKind, ) -> u32; @@ -177,7 +184,8 @@ pub(crate) trait MacroAssembler { /// The default implementation divides the given memory range /// into word-sized slots. Then it unrolls a series of store /// instructions, effectively assigning zero to each slot. - fn zero_mem_range(&mut self, mem: &Range, word_size: u32, regalloc: &mut RegAlloc) { + fn zero_mem_range(&mut self, mem: &Range, regalloc: &mut RegAlloc) { + let word_size = ::word_bytes(); if mem.is_empty() { return; } diff --git a/winch/codegen/src/trampoline.rs b/winch/codegen/src/trampoline.rs index b310735b9c98..f8ab257a34f4 100644 --- a/winch/codegen/src/trampoline.rs +++ b/winch/codegen/src/trampoline.rs @@ -34,15 +34,12 @@ pub enum TrampolineKind { } /// The main trampoline abstraction. -pub(crate) struct Trampoline<'a, A, M> +pub(crate) struct Trampoline<'a, M> where - A: ABI, M: MacroAssembler, { /// The macro assembler. masm: &'a mut M, - /// The ABI. - abi: &'a A, /// The main scratch register for the current architecture. It is /// not allocatable for the callee. scratch_reg: Reg, @@ -58,29 +55,26 @@ where /// calling convention. call_conv: &'a CallingConvention, /// The pointer size of the current ISA. - pointer_size: u8, + pointer_size: M::Ptr, } -impl<'a, A, M> Trampoline<'a, A, M> +impl<'a, M> Trampoline<'a, M> where - A: ABI, M: MacroAssembler, { /// Create a new trampoline. pub fn new( masm: &'a mut M, - abi: &'a A, scratch_reg: Reg, alloc_scratch_reg: Reg, call_conv: &'a CallingConvention, - pointer_size: u8, + pointer_size: M::Ptr, ) -> Self { Self { masm, - abi, scratch_reg, alloc_scratch_reg, - callee_saved_regs: ::callee_saved_regs(call_conv), + callee_saved_regs: ::callee_saved_regs(call_conv), call_conv, pointer_size, } @@ -109,7 +103,7 @@ where self.masm.mov( vmctx.into(), - ::vmctx_reg().into(), + ::vmctx_reg().into(), OperandSize::S64, ); @@ -120,8 +114,6 @@ where // Call the function that was passed into the trampoline. let allocated_stack = self.masm.call( - self.abi.call_stack_align().into(), - self.abi.arg_base_offset().into(), wasm_sig.stack_bytes, |masm| { // Save the SP when entering Wasm. @@ -183,7 +175,7 @@ where // Move the VM context pointer to the designated pinned register. self.masm.mov( vmctx.into(), - ::vmctx_reg().into(), + ::vmctx_reg().into(), OperandSize::S64, ); @@ -191,8 +183,6 @@ where let (offsets, spill_size) = self.spill(&native_sig.params); let reserved_stack = self.masm.call( - self.abi.call_stack_align().into(), - self.abi.arg_base_offset().into(), wasm_sig.stack_bytes, |masm| { // Save the SP when entering Wasm. @@ -211,7 +201,7 @@ where &native_sig.params[2..], &offsets[2..], self.scratch_reg, - self.abi.arg_base_offset().into(), + ::arg_base_offset().into(), ); CalleeKind::Direct(callee_index.as_u32()) }, @@ -252,8 +242,6 @@ where let (offsets, spill_size) = self.spill(&wasm_sig.params); let reserved_stack = self.masm.call( - self.abi.call_stack_align().into(), - self.abi.arg_base_offset().into(), native_sig.stack_bytes, |masm| { // Move the VM context into one of the scratch registers. @@ -269,7 +257,7 @@ where &wasm_sig.params, &offsets, self.scratch_reg, - self.abi.arg_base_offset().into(), + ::arg_base_offset().into(), ); let body_offset = self.pointer_size.vmnative_call_host_func_context_func_ref() @@ -298,7 +286,7 @@ where arg_base_offset: u32, ) { assert!(callee_params.len() == caller_params.len()); - let fp = ::fp_reg(); + let fp = ::fp_reg(); let mut offset_index = 0; callee_params @@ -359,12 +347,12 @@ where params.extend_from_slice(ty.params()); let native_type = FuncType::new(params, ty.results().to_owned()); - self.abi.sig(&native_type, self.call_conv) + ::sig(&native_type, self.call_conv) } /// Returns a signature using the Winch's default calling convention. fn wasm_sig(&self, ty: &FuncType) -> ABISig { - self.abi.sig(ty, &CallingConvention::Default) + ::sig(ty, &CallingConvention::Default) } /// Returns the register pair containing the callee and caller VM context pointers. @@ -404,7 +392,7 @@ where // The stack size for the spill, calculated // from the number of spilled register times // the size of each push (8 bytes). - let size = spilled * ::word_bytes(); + let size = spilled * ::word_bytes(); (offsets, size) } @@ -445,7 +433,7 @@ where scratch: Reg, ptr: &impl PtrSize, ) { - let sp = ::sp_reg(); + let sp = ::sp_reg(); masm.load(vm_runtime_limits_addr, scratch, OperandSize::S64); let addr = masm.address_at_reg(scratch, ptr.vmruntime_limits_last_wasm_entry_sp().into()); masm.store(sp.into(), addr, OperandSize::S64); @@ -469,13 +457,13 @@ where ); // Handle the frame pointer. - let fp = ::fp_reg(); + let fp = ::fp_reg(); let fp_addr = masm.address_at_reg(fp, 0); masm.load(fp_addr, scratch, OperandSize::S64); masm.store(scratch.into(), last_wasm_exit_fp_addr, OperandSize::S64); // Handle the return address. - let ret_addr_offset = ::ret_addr_offset(); + let ret_addr_offset = ::ret_addr_offset(); let ret_addr = masm.address_at_reg(fp, ret_addr_offset.into()); masm.load(ret_addr, scratch, OperandSize::S64); masm.store(scratch.into(), last_wasm_exit_pc_addr, OperandSize::S64); diff --git a/winch/codegen/src/visitor.rs b/winch/codegen/src/visitor.rs index ecf34576fa1d..65063c209fcc 100644 --- a/winch/codegen/src/visitor.rs +++ b/winch/codegen/src/visitor.rs @@ -4,13 +4,12 @@ //! which validates and dispatches to the corresponding //! machine code emitter. -use crate::abi::ABI; use crate::codegen::CodeGen; use crate::masm::{DivKind, MacroAssembler, OperandSize, RegImm, RemKind}; use crate::stack::Val; use wasmparser::ValType; use wasmparser::VisitOperator; -use wasmtime_environ::{FuncIndex, PtrSize}; +use wasmtime_environ::FuncIndex; /// A macro to define unsupported WebAssembly operators. /// @@ -57,11 +56,9 @@ macro_rules! def_unsupported { (emit $unsupported:tt $($rest:tt)*) => {$($rest)*}; } -impl<'a, A, M, P> VisitOperator<'a> for CodeGen<'a, A, M, P> +impl<'a, M> VisitOperator<'a> for CodeGen<'a, M> where M: MacroAssembler, - A: ABI, - P: PtrSize, { type Output = ();