From b6f2aa96ec315693dad6cda0f68fbf180a8763cf Mon Sep 17 00:00:00 2001 From: Chris Fallin Date: Thu, 25 Aug 2022 12:30:37 -0700 Subject: [PATCH] AArch64: port load operations to ISLE. --- cranelift/codegen/src/ir/dynamic_type.rs | 17 +++ cranelift/codegen/src/ir/mod.rs | 2 +- cranelift/codegen/src/isa/aarch64/abi.rs | 2 +- cranelift/codegen/src/isa/aarch64/inst.isle | 67 ++++++++- .../codegen/src/isa/aarch64/inst/args.rs | 15 -- cranelift/codegen/src/isa/aarch64/lower.isle | 99 +++++++++++++ cranelift/codegen/src/isa/aarch64/lower.rs | 35 ----- .../codegen/src/isa/aarch64/lower/isle.rs | 10 +- .../codegen/src/isa/aarch64/lower_inst.rs | 83 +---------- cranelift/codegen/src/machinst/isle.rs | 31 ++++- cranelift/codegen/src/prelude.isle | 15 +- .../filetests/isa/aarch64/amodes.clif | 130 +++++++++--------- .../filetests/isa/aarch64/heap_addr.clif | 10 +- .../filetests/filetests/isa/aarch64/simd.clif | 10 +- 14 files changed, 312 insertions(+), 214 deletions(-) diff --git a/cranelift/codegen/src/ir/dynamic_type.rs b/cranelift/codegen/src/ir/dynamic_type.rs index 91b13af98b34..f1ae30982114 100644 --- a/cranelift/codegen/src/ir/dynamic_type.rs +++ b/cranelift/codegen/src/ir/dynamic_type.rs @@ -1,6 +1,7 @@ //! Dynamic IR types use crate::ir::entities::DynamicType; +use crate::ir::types::*; use crate::ir::GlobalValue; use crate::ir::PrimaryMap; use crate::ir::Type; @@ -36,3 +37,19 @@ impl DynamicTypeData { /// All allocated dynamic types. pub type DynamicTypes = PrimaryMap; + +/// Convert a dynamic-vector type to a fixed-vector type. +pub fn dynamic_to_fixed(ty: Type) -> Type { + match ty { + I8X8XN => I8X8, + I8X16XN => I8X16, + I16X4XN => I16X4, + I16X8XN => I16X8, + I32X2XN => I32X2, + I32X4XN => I32X4, + I64X2XN => I64X2, + F32X4XN => F32X4, + F64X2XN => F64X2, + _ => unreachable!("unhandled type: {}", ty), + } +} diff --git a/cranelift/codegen/src/ir/mod.rs b/cranelift/codegen/src/ir/mod.rs index 5dc5ad612da2..8ba18987daed 100644 --- a/cranelift/codegen/src/ir/mod.rs +++ b/cranelift/codegen/src/ir/mod.rs @@ -35,7 +35,7 @@ pub use crate::ir::builder::{ }; pub use crate::ir::constant::{ConstantData, ConstantPool}; pub use crate::ir::dfg::{DataFlowGraph, ValueDef}; -pub use crate::ir::dynamic_type::{DynamicTypeData, DynamicTypes}; +pub use crate::ir::dynamic_type::{dynamic_to_fixed, DynamicTypeData, DynamicTypes}; pub use crate::ir::entities::{ Block, Constant, DynamicStackSlot, DynamicType, FuncRef, GlobalValue, Heap, Immediate, Inst, JumpTable, SigRef, StackSlot, Table, UserExternalNameRef, Value, diff --git a/cranelift/codegen/src/isa/aarch64/abi.rs b/cranelift/codegen/src/isa/aarch64/abi.rs index b30c4e5cb55e..59bea0c8a20b 100644 --- a/cranelift/codegen/src/isa/aarch64/abi.rs +++ b/cranelift/codegen/src/isa/aarch64/abi.rs @@ -5,7 +5,7 @@ use crate::ir::types; use crate::ir::types::*; use crate::ir::MemFlags; use crate::ir::Opcode; -use crate::ir::{ExternalName, LibCall, Signature}; +use crate::ir::{dynamic_to_fixed, ExternalName, LibCall, Signature}; use crate::isa; use crate::isa::aarch64::{inst::EmitState, inst::*, settings as aarch64_settings}; use crate::isa::unwind::UnwindInst; diff --git a/cranelift/codegen/src/isa/aarch64/inst.isle b/cranelift/codegen/src/isa/aarch64/inst.isle index db111ba12e47..42753f83c42a 100644 --- a/cranelift/codegen/src/isa/aarch64/inst.isle +++ b/cranelift/codegen/src/isa/aarch64/inst.isle @@ -1492,9 +1492,15 @@ ;; Lower the address of a load or a store. (decl amode (Type Inst u32) AMode) -;; TODO: Port lower_address() to ISLE. +;; TODO: Port lower_address() to ISLE. We should take a single `Value` +;; here, not an `Inst`; complex loads/stores with multiple addends +;; directly on the instruction are legalized into loads/stores with +;; only one address input. (extern constructor amode amode) +(decl pair_amode (Inst u32) PairAMode) +(extern constructor pair_amode pair_amode) + ;; Matches an `AMode` that is just a register. (decl pure amode_is_reg (AMode) Reg) ;; TODO: Implement in ISLE. @@ -2307,6 +2313,65 @@ (rule (udf trap_code) (SideEffectNoResult.Inst (MInst.Udf trap_code))) +;; Helpers for generating various load instructions, with varying +;; widths and sign/zero-extending properties. +(decl aarch64_uload8 (AMode MemFlags) Reg) +(rule (aarch64_uload8 amode flags) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.ULoad8 dst amode flags)))) + dst)) +(decl aarch64_sload8 (AMode MemFlags) Reg) +(rule (aarch64_sload8 amode flags) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.SLoad8 dst amode flags)))) + dst)) +(decl aarch64_uload16 (AMode MemFlags) Reg) +(rule (aarch64_uload16 amode flags) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.ULoad16 dst amode flags)))) + dst)) +(decl aarch64_sload16 (AMode MemFlags) Reg) +(rule (aarch64_sload16 amode flags) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.SLoad16 dst amode flags)))) + dst)) +(decl aarch64_uload32 (AMode MemFlags) Reg) +(rule (aarch64_uload32 amode flags) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.ULoad32 dst amode flags)))) + dst)) +(decl aarch64_sload32 (AMode MemFlags) Reg) +(rule (aarch64_sload32 amode flags) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.SLoad32 dst amode flags)))) + dst)) +(decl aarch64_uload64 (AMode MemFlags) Reg) +(rule (aarch64_uload64 amode flags) + (let ((dst WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.ULoad64 dst amode flags)))) + dst)) +(decl aarch64_fpuload32 (AMode MemFlags) Reg) +(rule (aarch64_fpuload32 amode flags) + (let ((dst WritableReg (temp_writable_reg $F64)) + (_ Unit (emit (MInst.FpuLoad32 dst amode flags)))) + dst)) +(decl aarch64_fpuload64 (AMode MemFlags) Reg) +(rule (aarch64_fpuload64 amode flags) + (let ((dst WritableReg (temp_writable_reg $F64)) + (_ Unit (emit (MInst.FpuLoad64 dst amode flags)))) + dst)) +(decl aarch64_fpuload128 (AMode MemFlags) Reg) +(rule (aarch64_fpuload128 amode flags) + (let ((dst WritableReg (temp_writable_reg $F64X2)) + (_ Unit (emit (MInst.FpuLoad128 dst amode flags)))) + dst)) +(decl aarch64_loadp64 (PairAMode MemFlags) ValueRegs) +(rule (aarch64_loadp64 amode flags) + (let ((dst1 WritableReg (temp_writable_reg $I64)) + (dst2 WritableReg (temp_writable_reg $I64)) + (_ Unit (emit (MInst.LoadP64 dst1 dst2 amode flags)))) + (value_regs dst1 dst2))) + ;; Immediate value helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Type of extension performed by an immediate helper diff --git a/cranelift/codegen/src/isa/aarch64/inst/args.rs b/cranelift/codegen/src/isa/aarch64/inst/args.rs index ce2d70c0925e..4428be2a8370 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/args.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/args.rs @@ -773,18 +773,3 @@ impl VectorSize { } } } - -pub(crate) fn dynamic_to_fixed(ty: Type) -> Type { - match ty { - I8X8XN => I8X8, - I8X16XN => I8X16, - I16X4XN => I16X4, - I16X8XN => I16X8, - I32X2XN => I32X2, - I32X4XN => I32X4, - I64X2XN => I64X2, - F32X4XN => F32X4, - F64X2XN => F64X2, - _ => unreachable!("unhandled type: {}", ty), - } -} diff --git a/cranelift/codegen/src/isa/aarch64/lower.isle b/cranelift/codegen/src/isa/aarch64/lower.isle index 5457039059c1..54620706f180 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.isle +++ b/cranelift/codegen/src/isa/aarch64/lower.isle @@ -2016,3 +2016,102 @@ (rule (lower (get_return_address)) (aarch64_link)) + +;;;; Rules for loads ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(rule (lower + inst @ (has_type $I8 (load flags address offset))) + (aarch64_uload8 (amode $I8 inst offset) flags)) +(rule (lower + inst @ (has_type $I16 (load flags address offset))) + (aarch64_uload16 (amode $I16 inst offset) flags)) +(rule (lower + inst @ (has_type $I32 (load flags address offset))) + (aarch64_uload32 (amode $I32 inst offset) flags)) +(rule (lower + inst @ (has_type $I64 (load flags address offset))) + (aarch64_uload64 (amode $I64 inst offset) flags)) +(rule (lower + inst @ (has_type $R64 (load flags address offset))) + (aarch64_uload64 (amode $I64 inst offset) flags)) +(rule (lower + inst @ (has_type $F32 (load flags address offset))) + (aarch64_fpuload32 (amode $F32 inst offset) flags)) +(rule (lower + inst @ (has_type $F64 (load flags address offset))) + (aarch64_fpuload64 (amode $F64 inst offset) flags)) +(rule (lower + inst @ (has_type $I128 (load flags address offset))) + (aarch64_loadp64 (pair_amode inst offset) flags)) +(rule (lower + inst @ (has_type (ty_vec64 _) + (load flags address offset))) + (aarch64_fpuload128 (amode $F64 inst offset) flags)) +(rule (lower + inst @ (has_type (ty_vec128 _) + (load flags address offset))) + (aarch64_fpuload128 (amode $I8X16 inst offset) flags)) +(rule (lower + inst @ (has_type (ty_dyn_vec64 _) + (load flags address offset))) + (aarch64_fpuload64 (amode $F64 inst offset) flags)) +(rule (lower + inst @ (has_type (ty_dyn_vec128 _) + (load flags address offset))) + (aarch64_fpuload128 (amode $I8X16 inst offset) flags)) + +(rule (lower + inst @ (uload8 flags address offset)) + (aarch64_uload8 (amode $I8 inst offset) flags)) +(rule (lower + inst @ (sload8 flags address offset)) + (aarch64_sload8 (amode $I8 inst offset) flags)) +(rule (lower + inst @ (uload16 flags address offset)) + (aarch64_uload16 (amode $I16 inst offset) flags)) +(rule (lower + inst @ (sload16 flags address offset)) + (aarch64_sload16 (amode $I16 inst offset) flags)) +(rule (lower + inst @ (uload32 flags address offset)) + (aarch64_uload32 (amode $I32 inst offset) flags)) +(rule (lower + inst @ (sload32 flags address offset)) + (aarch64_sload32 (amode $I32 inst offset) flags)) + +(rule (lower + inst @ (sload8x8 flags address offset)) + (vec_extend (VecExtendOp.Sxtl) + (aarch64_fpuload64 (amode $F64 inst offset) flags) + $false + (ScalarSize.Size16))) +(rule (lower + inst @ (uload8x8 flags address offset)) + (vec_extend (VecExtendOp.Uxtl) + (aarch64_fpuload64 (amode $F64 inst offset) flags) + $false + (ScalarSize.Size16))) +(rule (lower + inst @ (sload16x4 flags address offset)) + (vec_extend (VecExtendOp.Sxtl) + (aarch64_fpuload64 (amode $F64 inst offset) flags) + $false + (ScalarSize.Size32))) +(rule (lower + inst @ (uload16x4 flags address offset)) + (vec_extend (VecExtendOp.Uxtl) + (aarch64_fpuload64 (amode $F64 inst offset) flags) + $false + (ScalarSize.Size32))) +(rule (lower + inst @ (sload32x2 flags address offset)) + (vec_extend (VecExtendOp.Sxtl) + (aarch64_fpuload64 (amode $F64 inst offset) flags) + $false + (ScalarSize.Size64))) +(rule (lower + inst @ (uload32x2 flags address offset)) + (vec_extend (VecExtendOp.Uxtl) + (aarch64_fpuload64 (amode $F64 inst offset) flags) + $false + (ScalarSize.Size64))) diff --git a/cranelift/codegen/src/isa/aarch64/lower.rs b/cranelift/codegen/src/isa/aarch64/lower.rs index 3ec6bf3bbe99..4980167e5987 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.rs +++ b/cranelift/codegen/src/isa/aarch64/lower.rs @@ -1452,41 +1452,6 @@ pub(crate) fn materialize_bool_result( } } -fn load_op_to_ty(op: Opcode) -> Option { - match op { - Opcode::Sload8 | Opcode::Uload8 => Some(I8), - Opcode::Sload16 | Opcode::Uload16 => Some(I16), - Opcode::Sload32 | Opcode::Uload32 => Some(I32), - Opcode::Load => None, - Opcode::Sload8x8 | Opcode::Uload8x8 => Some(I8X8), - Opcode::Sload16x4 | Opcode::Uload16x4 => Some(I16X4), - Opcode::Sload32x2 | Opcode::Uload32x2 => Some(I32X2), - _ => None, - } -} - -/// Helper to lower a load instruction; this is used in several places, because -/// a load can sometimes be merged into another operation. -pub(crate) fn lower_load< - F: FnMut(&mut Lower, ValueRegs>, Type, AMode) -> CodegenResult<()>, ->( - ctx: &mut Lower, - ir_inst: IRInst, - inputs: &[InsnInput], - output: InsnOutput, - mut f: F, -) -> CodegenResult<()> { - let op = ctx.data(ir_inst).opcode(); - - let elem_ty = load_op_to_ty(op).unwrap_or_else(|| ctx.output_ty(ir_inst, 0)); - - let off = ctx.data(ir_inst).load_store_offset().unwrap(); - let mem = lower_address(ctx, elem_ty, &inputs[..], off); - let rd = get_output_reg(ctx, output); - - f(ctx, rd, elem_ty, mem) -} - //============================================================================= // Lowering-backend trait implementation. diff --git a/cranelift/codegen/src/isa/aarch64/lower/isle.rs b/cranelift/codegen/src/isa/aarch64/lower/isle.rs index 1e190a3fc765..338bc8c73ce9 100644 --- a/cranelift/codegen/src/isa/aarch64/lower/isle.rs +++ b/cranelift/codegen/src/isa/aarch64/lower/isle.rs @@ -12,7 +12,7 @@ use super::{ PairAMode, Reg, ScalarSize, ShiftOpAndAmt, UImm5, VecMisc2, VectorSize, NZCV, }; use crate::isa::aarch64::inst::{FPULeftShiftImm, FPURightShiftImm}; -use crate::isa::aarch64::lower::{lower_address, lower_splat_const}; +use crate::isa::aarch64::lower::{lower_address, lower_pair_address, lower_splat_const}; use crate::isa::aarch64::settings::Flags as IsaFlags; use crate::machinst::{isle::*, InputSourceInst}; use crate::settings::Flags; @@ -481,6 +481,14 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> ) } + fn pair_amode(&mut self, mem_op: Inst, offset: u32) -> PairAMode { + lower_pair_address( + self.lower_ctx, + &insn_inputs(self.lower_ctx, mem_op)[..], + offset as i32, + ) + } + fn amode_is_reg(&mut self, address: &AMode) -> Option { address.is_reg() } diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index 10f8f3516df0..be67989aa327 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -4,7 +4,7 @@ use super::lower::*; use crate::binemit::CodeOffset; use crate::ir::types::*; use crate::ir::Inst as IRInst; -use crate::ir::{InstructionData, Opcode}; +use crate::ir::{dynamic_to_fixed, InstructionData, Opcode}; use crate::isa::aarch64::abi::*; use crate::isa::aarch64::inst::*; use crate::isa::aarch64::settings as aarch64_settings; @@ -103,86 +103,7 @@ pub(crate) fn lower_insn_to_regs( | Opcode::Sload16x4 | Opcode::Uload16x4 | Opcode::Sload32x2 - | Opcode::Uload32x2 => { - let sign_extend = match op { - Opcode::Sload8 | Opcode::Sload16 | Opcode::Sload32 => true, - _ => false, - }; - let flags = ctx - .memflags(insn) - .expect("Load instruction should have memflags"); - - let out_ty = ctx.output_ty(insn, 0); - if out_ty == I128 { - let off = ctx.data(insn).load_store_offset().unwrap(); - let mem = lower_pair_address(ctx, &inputs[..], off); - let dst = get_output_reg(ctx, outputs[0]); - ctx.emit(Inst::LoadP64 { - rt: dst.regs()[0], - rt2: dst.regs()[1], - mem, - flags, - }); - } else { - lower_load( - ctx, - insn, - &inputs[..], - outputs[0], - |ctx, dst, mut elem_ty, mem| { - if elem_ty.is_dynamic_vector() { - elem_ty = dynamic_to_fixed(elem_ty); - } - let rd = dst.only_reg().unwrap(); - let is_float = ty_has_float_or_vec_representation(elem_ty); - ctx.emit(match (ty_bits(elem_ty), sign_extend, is_float) { - (1, _, _) => Inst::ULoad8 { rd, mem, flags }, - (8, false, _) => Inst::ULoad8 { rd, mem, flags }, - (8, true, _) => Inst::SLoad8 { rd, mem, flags }, - (16, false, _) => Inst::ULoad16 { rd, mem, flags }, - (16, true, _) => Inst::SLoad16 { rd, mem, flags }, - (32, false, false) => Inst::ULoad32 { rd, mem, flags }, - (32, true, false) => Inst::SLoad32 { rd, mem, flags }, - (32, _, true) => Inst::FpuLoad32 { rd, mem, flags }, - (64, _, false) => Inst::ULoad64 { rd, mem, flags }, - // Note that we treat some of the vector loads as scalar floating-point loads, - // which is correct in a little endian environment. - (64, _, true) => Inst::FpuLoad64 { rd, mem, flags }, - (128, _, true) => Inst::FpuLoad128 { rd, mem, flags }, - _ => { - return Err(CodegenError::Unsupported(format!( - "Unsupported type in load: {:?}", - elem_ty - ))) - } - }); - - let vec_extend = match op { - Opcode::Sload8x8 => Some((VecExtendOp::Sxtl, ScalarSize::Size16)), - Opcode::Uload8x8 => Some((VecExtendOp::Uxtl, ScalarSize::Size16)), - Opcode::Sload16x4 => Some((VecExtendOp::Sxtl, ScalarSize::Size32)), - Opcode::Uload16x4 => Some((VecExtendOp::Uxtl, ScalarSize::Size32)), - Opcode::Sload32x2 => Some((VecExtendOp::Sxtl, ScalarSize::Size64)), - Opcode::Uload32x2 => Some((VecExtendOp::Uxtl, ScalarSize::Size64)), - _ => None, - }; - - if let Some((t, lane_size)) = vec_extend { - let rd = dst.only_reg().unwrap(); - ctx.emit(Inst::VecExtend { - t, - rd, - rn: rd.to_reg(), - high_half: false, - lane_size, - }); - } - - Ok(()) - }, - )?; - } - } + | Opcode::Uload32x2 => implemented_in_isle(ctx), Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 => { let off = ctx.data(insn).load_store_offset().unwrap(); diff --git a/cranelift/codegen/src/machinst/isle.rs b/cranelift/codegen/src/machinst/isle.rs index 4d9fe9fe20dd..a848b4e742ca 100644 --- a/cranelift/codegen/src/machinst/isle.rs +++ b/cranelift/codegen/src/machinst/isle.rs @@ -9,8 +9,8 @@ use target_lexicon::Triple; pub use super::MachLabel; pub use crate::data_value::DataValue; pub use crate::ir::{ - ArgumentExtension, Constant, DynamicStackSlot, ExternalName, FuncRef, GlobalValue, Immediate, - SigRef, StackSlot, + dynamic_to_fixed, ArgumentExtension, Constant, DynamicStackSlot, ExternalName, FuncRef, + GlobalValue, Immediate, SigRef, StackSlot, }; pub use crate::isa::unwind::UnwindInst; pub use crate::machinst::{ @@ -361,6 +361,15 @@ macro_rules! isle_prelude_methods { } } + #[inline] + fn ty_vec64_ctor(&mut self, ty: Type) -> Option { + if ty.is_vector() && ty.bits() == 64 { + Some(ty) + } else { + None + } + } + #[inline] fn ty_vec64(&mut self, ty: Type) -> Option { if ty.is_vector() && ty.bits() == 64 { @@ -379,6 +388,24 @@ macro_rules! isle_prelude_methods { } } + #[inline] + fn ty_dyn_vec64(&mut self, ty: Type) -> Option { + if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 64 { + Some(ty) + } else { + None + } + } + + #[inline] + fn ty_dyn_vec128(&mut self, ty: Type) -> Option { + if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 128 { + Some(ty) + } else { + None + } + } + #[inline] fn ty_vec64_int(&mut self, ty: Type) -> Option { if ty.is_vector() && ty.bits() == 64 && ty.lane_type().is_int() { diff --git a/cranelift/codegen/src/prelude.isle b/cranelift/codegen/src/prelude.isle index f5caeb94b783..67e98cb16ac0 100644 --- a/cranelift/codegen/src/prelude.isle +++ b/cranelift/codegen/src/prelude.isle @@ -365,14 +365,25 @@ (decl ty_scalar_float (Type) Type) (extern extractor ty_scalar_float ty_scalar_float) -;; A pure constructor that only matches 64-bit vector types. +;; A pure constructor/extractor that only matches 64-bit vector types. (decl pure ty_vec64 (Type) Type) -(extern constructor ty_vec64 ty_vec64) +(extern constructor ty_vec64 ty_vec64_ctor) +(extern extractor ty_vec64 ty_vec64) ;; An extractor that only matches 128-bit vector types. (decl ty_vec128 (Type) Type) (extern extractor ty_vec128 ty_vec128) +;; An extractor that only matches dynamic vector types with a 64-bit +;; base type. +(decl ty_dyn_vec64 (Type) Type) +(extern extractor ty_dyn_vec64 ty_dyn_vec64) + +;; An extractor that only matches dynamic vector types with a 128-bit +;; base type. +(decl ty_dyn_vec128 (Type) Type) +(extern extractor ty_dyn_vec128 ty_dyn_vec128) + ;; An extractor that only matches 64-bit vector types with integer ;; lanes (I8X8, I16X4, I32X2) (decl ty_vec64_int (Type) Type) diff --git a/cranelift/filetests/filetests/isa/aarch64/amodes.clif b/cranelift/filetests/filetests/isa/aarch64/amodes.clif index c3254cc9469a..83b7b96bc206 100644 --- a/cranelift/filetests/filetests/isa/aarch64/amodes.clif +++ b/cranelift/filetests/filetests/isa/aarch64/amodes.clif @@ -36,8 +36,8 @@ block0(v0: i32, v1: i32): } ; block0: -; mov w6, w0 -; ldr w0, [x6, w1, UXTW] +; mov w5, w0 +; ldr w0, [x5, w1, UXTW] ; ret function %f8(i64, i32) -> i32 { @@ -52,10 +52,10 @@ block0(v0: i64, v1: i32): } ; block0: -; add x6, x0, #68 -; add x6, x6, x0 -; add x6, x6, x1, SXTW -; ldr w0, [x6, w1, SXTW] +; add x5, x0, #68 +; add x5, x5, x0 +; add x5, x5, x1, SXTW +; ldr w0, [x5, w1, SXTW] ; ret function %f9(i64, i64, i64) -> i32 { @@ -85,10 +85,10 @@ block0(v0: i64, v1: i64, v2: i64): } ; block0: -; movz x8, #4100 -; add x8, x8, x1 -; add x8, x8, x2 -; ldr w0, [x8, x0] +; movz x7, #4100 +; add x7, x7, x1 +; add x7, x7, x2 +; ldr w0, [x7, x0] ; ret function %f10() -> i32 { @@ -99,8 +99,8 @@ block0: } ; block0: -; movz x2, #1234 -; ldr w0, [x2] +; movz x1, #1234 +; ldr w0, [x1] ; ret function %f11(i64) -> i32 { @@ -112,8 +112,8 @@ block0(v0: i64): } ; block0: -; add x4, x0, #8388608 -; ldr w0, [x4] +; add x3, x0, #8388608 +; ldr w0, [x3] ; ret function %f12(i64) -> i32 { @@ -125,8 +125,8 @@ block0(v0: i64): } ; block0: -; sub x4, x0, #4 -; ldr w0, [x4] +; sub x3, x0, #4 +; ldr w0, [x3] ; ret function %f13(i64) -> i32 { @@ -138,10 +138,10 @@ block0(v0: i64): } ; block0: -; movz w4, #51712 -; movk w4, #15258, LSL #16 -; add x4, x4, x0 -; ldr w0, [x4] +; movz w3, #51712 +; movk w3, #15258, LSL #16 +; add x3, x3, x0 +; ldr w0, [x3] ; ret function %f14(i32) -> i32 { @@ -152,8 +152,8 @@ block0(v0: i32): } ; block0: -; sxtw x4, w0 -; ldr w0, [x4] +; sxtw x3, w0 +; ldr w0, [x3] ; ret function %f15(i32, i32) -> i32 { @@ -166,8 +166,8 @@ block0(v0: i32, v1: i32): } ; block0: -; sxtw x6, w0 -; ldr w0, [x6, w1, SXTW] +; sxtw x5, w0 +; ldr w0, [x5, w1, SXTW] ; ret function %f18(i64, i64, i64) -> i32 { @@ -179,8 +179,8 @@ block0(v0: i64, v1: i64, v2: i64): } ; block0: -; movn w8, #4097 -; ldrsh x0, [x8] +; movn w7, #4097 +; ldrsh x0, [x7] ; ret function %f19(i64, i64, i64) -> i32 { @@ -192,8 +192,8 @@ block0(v0: i64, v1: i64, v2: i64): } ; block0: -; movz x8, #4098 -; ldrsh x0, [x8] +; movz x7, #4098 +; ldrsh x0, [x7] ; ret function %f20(i64, i64, i64) -> i32 { @@ -205,9 +205,9 @@ block0(v0: i64, v1: i64, v2: i64): } ; block0: -; movn w8, #4097 -; sxtw x10, w8 -; ldrsh x0, [x10] +; movn w7, #4097 +; sxtw x9, w7 +; ldrsh x0, [x9] ; ret function %f21(i64, i64, i64) -> i32 { @@ -219,9 +219,9 @@ block0(v0: i64, v1: i64, v2: i64): } ; block0: -; movz x8, #4098 -; sxtw x10, w8 -; ldrsh x0, [x10] +; movz x7, #4098 +; sxtw x9, w7 +; ldrsh x0, [x9] ; ret function %i128(i64) -> i128 { @@ -232,11 +232,11 @@ block0(v0: i64): } ; block0: -; mov x8, x0 -; ldp x3, x1, [x8] -; mov x11, x3 +; mov x6, x0 +; ldp x7, x1, [x6] +; mov x11, x7 ; stp x11, x1, [x0] -; mov x0, x3 +; mov x0, x7 ; ret function %i128_imm_offset(i64) -> i128 { @@ -247,11 +247,11 @@ block0(v0: i64): } ; block0: -; mov x8, x0 -; ldp x3, x1, [x8, #16] -; mov x11, x3 +; mov x6, x0 +; ldp x7, x1, [x6, #16] +; mov x11, x7 ; stp x11, x1, [x0, #16] -; mov x0, x3 +; mov x0, x7 ; ret function %i128_imm_offset_large(i64) -> i128 { @@ -262,11 +262,11 @@ block0(v0: i64): } ; block0: -; mov x8, x0 -; ldp x3, x1, [x8, #504] -; mov x11, x3 +; mov x6, x0 +; ldp x7, x1, [x6, #504] +; mov x11, x7 ; stp x11, x1, [x0, #504] -; mov x0, x3 +; mov x0, x7 ; ret function %i128_imm_offset_negative_large(i64) -> i128 { @@ -277,11 +277,11 @@ block0(v0: i64): } ; block0: -; mov x8, x0 -; ldp x3, x1, [x8, #-512] -; mov x11, x3 +; mov x6, x0 +; ldp x7, x1, [x6, #-512] +; mov x11, x7 ; stp x11, x1, [x0, #-512] -; mov x0, x3 +; mov x0, x7 ; ret function %i128_add_offset(i64) -> i128 { @@ -293,11 +293,11 @@ block0(v0: i64): } ; block0: -; mov x8, x0 -; ldp x3, x1, [x8, #32] -; mov x11, x3 +; mov x6, x0 +; ldp x7, x1, [x6, #32] +; mov x11, x7 ; stp x11, x1, [x0, #32] -; mov x0, x3 +; mov x0, x7 ; ret function %i128_32bit_sextend_simple(i32) -> i128 { @@ -309,11 +309,11 @@ block0(v0: i32): } ; block0: -; sxtw x8, w0 -; ldp x4, x1, [x8] -; sxtw x9, w0 -; mov x0, x4 -; stp x0, x1, [x9] +; sxtw x6, w0 +; ldp x10, x1, [x6] +; sxtw x7, w0 +; mov x0, x10 +; stp x0, x1, [x7] ; ret function %i128_32bit_sextend(i64, i32) -> i128 { @@ -327,13 +327,13 @@ block0(v0: i64, v1: i32): } ; block0: -; mov x10, x0 -; add x10, x10, x1, SXTW -; ldp x6, x7, [x10, #24] +; mov x8, x0 +; add x8, x8, x1, SXTW +; ldp x10, x11, [x8, #24] ; add x0, x0, x1, SXTW -; mov x15, x6 -; mov x1, x7 +; mov x15, x10 +; mov x1, x11 ; stp x15, x1, [x0, #24] -; mov x0, x6 +; mov x0, x10 ; ret diff --git a/cranelift/filetests/filetests/isa/aarch64/heap_addr.clif b/cranelift/filetests/filetests/isa/aarch64/heap_addr.clif index c8056c3d9e0b..74080e6d4126 100644 --- a/cranelift/filetests/filetests/isa/aarch64/heap_addr.clif +++ b/cranelift/filetests/filetests/isa/aarch64/heap_addr.clif @@ -15,15 +15,15 @@ block0(v0: i64, v1: i32): ; block0: ; mov w10, w1 -; ldr x5, [x0] -; mov x11, x5 +; ldr x11, [x0] +; mov x11, x11 ; subs xzr, x10, x11 ; b.ls label1 ; b label2 ; block1: -; add x13, x0, x1, UXTW +; add x12, x0, x1, UXTW ; subs xzr, x10, x11 -; movz x14, #0 -; csel x0, x14, x13, hi +; movz x13, #0 +; csel x0, x13, x12, hi ; csdb ; ret ; block2: diff --git a/cranelift/filetests/filetests/isa/aarch64/simd.clif b/cranelift/filetests/filetests/isa/aarch64/simd.clif index 166d27b80b08..b26811e6fad8 100644 --- a/cranelift/filetests/filetests/isa/aarch64/simd.clif +++ b/cranelift/filetests/filetests/isa/aarch64/simd.clif @@ -86,9 +86,9 @@ block0(v0: i64, v1: i64): } ; block0: -; ldrb w4, [x0] +; ldrb w8, [x0] ; ld1r { v0.16b }, [x1] -; dup v1.16b, w4 +; dup v1.16b, w8 ; ret function %f8(i64, i64) -> i8x16, i8x16 { @@ -100,9 +100,9 @@ block0(v0: i64, v1: i64): } ; block0: -; ldrb w4, [x0] -; dup v0.16b, w4 -; dup v1.16b, w4 +; ldrb w8, [x0] +; dup v0.16b, w8 +; dup v1.16b, w8 ; ret function %f9() -> i32x2 {