From 4b3b0cac0bb7a2e02b2c7ec86c68688e0ce91748 Mon Sep 17 00:00:00 2001 From: Laurence Tratt Date: Mon, 20 Jan 2025 15:01:32 +0000 Subject: [PATCH] Be somewhat clearer about upper bits in the codegen. This is a bit of a hodgepodge of changes, but there are three major things: 1. Get rid of all the "weird" sized ints, none of which we see in practise, and which we thus have low confidence are correct. 2. Simplify some of the zero extension stuff. 3. Zero extend all values to 64 bits before a call. We do this crudely for now. --- .../compile/jitc_yk/codegen/x64/lsregalloc.rs | 32 +- ykrt/src/compile/jitc_yk/codegen/x64/mod.rs | 473 +++++++----------- ykrt/src/compile/jitc_yk/jit_ir/mod.rs | 35 +- .../src/compile/jitc_yk/jit_ir/well_formed.rs | 8 +- 4 files changed, 233 insertions(+), 315 deletions(-) diff --git a/ykrt/src/compile/jitc_yk/codegen/x64/lsregalloc.rs b/ykrt/src/compile/jitc_yk/codegen/x64/lsregalloc.rs index 22c0ff832..f87d91ecf 100644 --- a/ykrt/src/compile/jitc_yk/codegen/x64/lsregalloc.rs +++ b/ykrt/src/compile/jitc_yk/codegen/x64/lsregalloc.rs @@ -829,15 +829,17 @@ impl LSRegAlloc<'_> { RegState::FromInst(iidx) => { if self.spills[usize::from(iidx)] == SpillState::Empty { let inst = self.m.inst(iidx); - let size = inst.def_byte_size(self.m); - self.stack.align(size); // FIXME - let frame_off = self.stack.grow(size); + let bitw = inst.def_bitw(self.m); + let bytew = inst.def_byte_size(self.m); + debug_assert!(bitw >= bytew); + self.stack.align(bytew); + let frame_off = self.stack.grow(bytew); let off = i32::try_from(frame_off).unwrap(); - match size { - 1 => dynasm!(asm; mov BYTE [rbp - off], Rb(reg.code())), - 2 => dynasm!(asm; mov WORD [rbp - off], Rw(reg.code())), - 4 => dynasm!(asm; mov DWORD [rbp - off], Rd(reg.code())), - 8 => dynasm!(asm; mov QWORD [rbp - off], Rq(reg.code())), + match bitw { + 8 => dynasm!(asm; mov BYTE [rbp - off], Rb(reg.code())), + 16 => dynasm!(asm; mov WORD [rbp - off], Rw(reg.code())), + 32 => dynasm!(asm; mov DWORD [rbp - off], Rd(reg.code())), + 64 => dynasm!(asm; mov QWORD [rbp - off], Rq(reg.code())), _ => unreachable!(), } self.spills[usize::from(iidx)] = SpillState::Stack(off); @@ -1380,13 +1382,15 @@ impl LSRegAlloc<'_> { RegState::FromInst(iidx) => { if self.spills[usize::from(iidx)] == SpillState::Empty { let inst = self.m.inst(iidx); - let size = inst.def_byte_size(self.m); - self.stack.align(size); // FIXME - let frame_off = self.stack.grow(size); + let bitw = inst.def_bitw(self.m); + let bytew = inst.def_byte_size(self.m); + debug_assert!(bitw >= bytew); + self.stack.align(bytew); + let frame_off = self.stack.grow(bytew); let off = i32::try_from(frame_off).unwrap(); - match size { - 4 => dynasm!(asm ; movss [rbp - off], Rx(reg.code())), - 8 => dynasm!(asm ; movsd [rbp - off], Rx(reg.code())), + match bitw { + 32 => dynasm!(asm ; movss [rbp - off], Rx(reg.code())), + 64 => dynasm!(asm ; movsd [rbp - off], Rx(reg.code())), _ => unreachable!(), } self.spills[usize::from(iidx)] = SpillState::Stack(off); diff --git a/ykrt/src/compile/jitc_yk/codegen/x64/mod.rs b/ykrt/src/compile/jitc_yk/codegen/x64/mod.rs index 03bdab919..f711a2125 100644 --- a/ykrt/src/compile/jitc_yk/codegen/x64/mod.rs +++ b/ykrt/src/compile/jitc_yk/codegen/x64/mod.rs @@ -11,14 +11,8 @@ //! partial register stalls in the CPU pipeline. Generally speaking we prefer to use 64-bit //! operations, but sometimes we special-case 32-bit operations since they are so common. //! -//! * If an object occupies only part of a 64-bit register, then there are no guarantees about -//! the unused higher-order bits. Codegen routines must be mindful of this and, depending upon -//! the operation, may be required to (for example) "mask off" or "sign extend into" the -//! undefined bits in order to compute the correct result. -//! -//! FIXME: the code generator clobbers registers willy-nilly because at the time of writing we have -//! a register allocator that doesn't actually use any registers. Later we will have to audit the -//! backend and insert register save/restore for clobbered registers. +//! * When a value is in a register, we make no guarantees about what the upper bits are set to. +//! You must sign or zero extend at all points that these values are important. use super::{ super::{ @@ -757,21 +751,16 @@ impl<'a> Assemble<'a> { /// `from_bits` must be between 1 and 64. fn sign_extend_to_reg64(&mut self, reg: Rq, from_bits: u8) { debug_assert!(from_bits > 0 && from_bits <= 64); - // For "regularly-sized" integers, we can use movsx to achieve the sign extend and without - // fear of register stalls. match from_bits { + 1 => dynasm!(self.asm + ; and Rq(reg.code()), 1 + ; neg Rq(reg.code()) + ), 8 => dynasm!(self.asm; movsx Rq(reg.code()), Rb(reg.code())), 16 => dynasm!(self.asm; movsx Rq(reg.code()), Rw(reg.code())), 32 => dynasm!(self.asm; movsx Rq(reg.code()), Rd(reg.code())), 64 => (), // nothing to do. - _ => { - // For "oddly-sized" integers we have to do the sign extend ourselves. - let shift = REG64_BITSIZE - usize::from(from_bits); - dynasm!(self.asm - ; shl Rq(reg.code()), shift as i8 // shift all the way left. - ; sar Rq(reg.code()), shift as i8 // shift back with the correct leading bits. - ); - } + x => todo!("{x}"), } } @@ -781,24 +770,14 @@ impl<'a> Assemble<'a> { /// `from_bits` must be between 1 and 64. fn zero_extend_to_reg64(&mut self, reg: Rq, from_bits: u8) { debug_assert!(from_bits > 0 && from_bits <= 64); - // For "regularly-sized" integers, we can use movzx to achieve the zero extend and without - // fear of register stalls. match from_bits { - 8 => dynasm!(self.asm; movzx Rq(reg.code()), Rb(reg.code())), - 16 => dynasm!(self.asm; movzx Rq(reg.code()), Rw(reg.code())), + 1..=31 => dynasm!(self.asm; and Rq(reg.code()), ((1u64 << from_bits) - 1) as i32), 32 => { // mov into a 32-bit register sign-extends up to 64 already. dynasm!(self.asm; mov Rd(reg.code()), Rd(reg.code())); } 64 => (), // nothing to do. - _ => { - // For "oddly-sized" integers we have to do the zero extend ourselves. - let shift = REG64_BITSIZE - usize::from(from_bits); - dynasm!(self.asm - ; shl Rq(reg.code()), shift as i8 // shift all the way left. - ; shr Rq(reg.code()), shift as i8 // shift back with leading zeros. - ); - } + x => todo!("{x}"), } } @@ -1314,18 +1293,17 @@ impl<'a> Assemble<'a> { RegConstraint::OutputCanBeSameAsInput(ptr_op), ], ); - let size = self.m.inst(iidx).def_byte_size(self.m); - debug_assert!(size <= REG64_BYTESIZE); - match size { - 1 => { + let bitw = self.m.inst(iidx).def_bitw(self.m); + match bitw { + 8 => { dynasm!(self.asm ; movzx Rq(out_reg.code()), BYTE [Rq(in_reg.code()) + off]) } - 2 => { + 16 => { dynasm!(self.asm ; movzx Rq(out_reg.code()), WORD [Rq(in_reg.code()) + off]) } - 4 => dynasm!(self.asm ; mov Rd(out_reg.code()), [Rq(in_reg.code()) + off]), - 8 => dynasm!(self.asm ; mov Rq(out_reg.code()), [Rq(in_reg.code()) + off]), - _ => todo!("{}", size), + 32 => dynasm!(self.asm ; mov Rd(out_reg.code()), [Rq(in_reg.code()) + off]), + 64 => dynasm!(self.asm ; mov Rq(out_reg.code()), [Rq(in_reg.code()) + off]), + _ => todo!("{bitw}"), }; } Ty::Float(fty) => { @@ -1419,22 +1397,22 @@ impl<'a> Assemble<'a> { let val = inst.val(self.m); match self.m.type_(val.tyidx(self.m)) { Ty::Integer(_) | Ty::Ptr => { - let byte_size = val.byte_size(self.m); + let bitw = val.bitw(self.m); if let Some(imm) = self.op_to_zero_ext_immediate(&val) { let [tgt_reg] = self.ra .assign_gp_regs(&mut self.asm, iidx, [RegConstraint::Input(tgt_op)]); - match (imm, byte_size) { - (Immediate::I8(v), 1) => { + match (imm, bitw) { + (Immediate::I8(v), 8) => { dynasm!(self.asm ; mov BYTE [Rq(tgt_reg.code()) + off], v) } - (Immediate::I16(v), 2) => { + (Immediate::I16(v), 16) => { dynasm!(self.asm ; mov WORD [Rq(tgt_reg.code()) + off], v) } - (Immediate::I32(v), 4) => { + (Immediate::I32(v), 32) => { dynasm!(self.asm ; mov DWORD [Rq(tgt_reg.code()) + off], v) } - (Immediate::I32(v), 8) => { + (Immediate::I32(v), 64) => { dynasm!(self.asm ; mov QWORD [Rq(tgt_reg.code()) + off], v) } _ => todo!(), @@ -1445,11 +1423,17 @@ impl<'a> Assemble<'a> { iidx, [RegConstraint::Input(tgt_op), RegConstraint::Input(val)], ); - match byte_size { - 1 => dynasm!(self.asm ; mov [Rq(tgt_reg.code()) + off], Rb(val_reg.code())), - 2 => dynasm!(self.asm ; mov [Rq(tgt_reg.code()) + off], Rw(val_reg.code())), - 4 => dynasm!(self.asm ; mov [Rq(tgt_reg.code()) + off], Rd(val_reg.code())), - 8 => dynasm!(self.asm ; mov [Rq(tgt_reg.code()) + off], Rq(val_reg.code())), + match bitw { + 8 => dynasm!(self.asm ; mov [Rq(tgt_reg.code()) + off], Rb(val_reg.code())), + 16 => { + dynasm!(self.asm ; mov [Rq(tgt_reg.code()) + off], Rw(val_reg.code())) + } + 32 => { + dynasm!(self.asm ; mov [Rq(tgt_reg.code()) + off], Rd(val_reg.code())) + } + 64 => { + dynasm!(self.asm ; mov [Rq(tgt_reg.code()) + off], Rq(val_reg.code())) + } _ => todo!(), } } @@ -1616,9 +1600,10 @@ impl<'a> Assemble<'a> { let _: ([Rq; CALLER_CLOBBER_REGS.len()], [Rx; 16]) = self.ra.assign_regs( &mut self.asm, iidx, - gp_cnstrs.try_into().unwrap(), + gp_cnstrs.clone().try_into().unwrap(), fp_cnstrs, ); + self.clear_bits_for_call(&gp_cnstrs); // rax is considered clobbered, but isn't used to pass an argument, so we can // safely use it for the function pointer. dynasm!(self.asm @@ -1631,9 +1616,10 @@ impl<'a> Assemble<'a> { self.ra.assign_regs( &mut self.asm, iidx, - gp_cnstrs.try_into().unwrap(), + gp_cnstrs.clone().try_into().unwrap(), fp_cnstrs, ); + self.clear_bits_for_call(&gp_cnstrs); dynasm!(self.asm ; mov rax, num_float_args ; mov Rq(tmp_reg.code()), QWORD p as i64 @@ -1648,9 +1634,10 @@ impl<'a> Assemble<'a> { self.ra.assign_regs( &mut self.asm, iidx, - gp_cnstrs.try_into().unwrap(), + gp_cnstrs.clone().try_into().unwrap(), fp_cnstrs, ); + self.clear_bits_for_call(&gp_cnstrs[..gp_cnstrs.len() - 1]); if fty.is_vararg() { dynasm!(self.asm; mov rax, num_float_args); // SysV x64 ABI } @@ -1662,6 +1649,29 @@ impl<'a> Assemble<'a> { Ok(()) } + /// When we're about to make a call, we need to clear upper bits in registers. + /// + /// This function clears those bits in a manner that is compatible with the SysV ABI. Note: + /// we're a little bit OTT here, and we could do less work if we become more clever about it. + fn clear_bits_for_call(&mut self, gp_cnstrs: &[RegConstraint]) { + for gp_cnstr in gp_cnstrs { + match gp_cnstr { + RegConstraint::InputIntoReg(op, reg) + | RegConstraint::InputIntoRegAndClobber(op, reg) + | RegConstraint::InputOutputIntoReg(op, reg) => { + self.zero_extend_to_reg64(*reg, u8::try_from(op.bitw(self.m)).unwrap()) + } + RegConstraint::Input(_) | RegConstraint::InputOutput(_) => unreachable!(), + RegConstraint::OutputCanBeSameAsInput(_) => (), + RegConstraint::Output + | RegConstraint::OutputFromReg(_) + | RegConstraint::Clobber(_) + | RegConstraint::Temporary + | RegConstraint::None => (), + } + } + } + /// Return the [VarLocation] an [Operand] relates to. fn op_to_var_location(&self, op: Operand) -> VarLocation { match op { @@ -1850,7 +1860,7 @@ impl<'a> Assemble<'a> { ic_inst.predicate(), ic_inst.rhs(self.m), ); - let bit_size = self.m.type_(lhs.tyidx(self.m)).bit_size().unwrap(); + let bit_size = self.m.type_(lhs.tyidx(self.m)).bitw().unwrap(); let imm = if pred.signed() { self.op_to_sign_ext_i32(&rhs) } else { @@ -1909,7 +1919,7 @@ impl<'a> Assemble<'a> { fn cg_icmp(&mut self, iidx: InstIdx, inst: &jit_ir::ICmpInst) { let (lhs, pred, rhs) = (inst.lhs(self.m), inst.predicate(), inst.rhs(self.m)); - let bit_size = self.m.type_(lhs.tyidx(self.m)).bit_size().unwrap(); + let bit_size = self.m.type_(lhs.tyidx(self.m)).bitw().unwrap(); let imm = if pred.signed() { self.op_to_sign_ext_i32(&rhs) } else { @@ -2335,9 +2345,9 @@ impl<'a> Assemble<'a> { fn cg_zext(&mut self, iidx: InstIdx, zinst: &jit_ir::ZExtInst) { let src_type = self.m.type_(zinst.val(self.m).tyidx(self.m)); - let src_bitsize = src_type.bit_size().unwrap(); + let src_bitsize = src_type.bitw().unwrap(); let dest_type = self.m.type_(zinst.dest_tyidx()); - let dest_bitsize = dest_type.bit_size().unwrap(); + let dest_bitsize = dest_type.bitw().unwrap(); if dest_bitsize <= REG64_BITSIZE { if src_bitsize == 32 || src_bitsize == 64 { @@ -2495,7 +2505,7 @@ impl<'a> Assemble<'a> { debug_assert_eq!( self.m .type_(inst.cond(self.m).tyidx(self.m)) - .bit_size() + .bitw() .unwrap(), 1 ); @@ -3470,27 +3480,21 @@ mod tests { %1: i16 = param 1 %2: i32 = param 2 %3: i32 = param 3 - %4: i63 = param 4 - %5: i63 = param 5 - %6: i1 = param 6 - %7: i16 = and %0, %1 - %8: i32 = and %2, %3 - %9: i63 = and %4, %5 - %10: i1 = and %6, 1i1 + %4: i1 = param 4 + %5: i16 = and %0, %1 + %6: i32 = and %2, %3 + %7: i1 = and %4, 1i1 + black_box %5 + black_box %6 black_box %7 - black_box %8 - black_box %9 - black_box %10 ", " ... - ; %7: i16 = and %0, %1 + ; %5: i16 = and %0, %1 and r.64.a, r.64.b - ; %8: i32 = and %2, %3 + ; %6: i32 = and %2, %3 and r.64.c, r.64.d - ; %9: i63 = and %4, %5 - and r.64.e, r.64.f - ; %10: i1 = and %6, 1i1 + ; %7: i1 = and %4, 1i1 and r.32._, 0x01 ... ", @@ -3505,28 +3509,20 @@ mod tests { entry: %0: i16 = param 0 %1: i32 = param 1 - %2: i63 = param 2 - %3: i16 = ashr %0, 1i16 - %4: i32 = ashr %1, 2i32 - %5: i63 = ashr %2, 3i63 + %2: i16 = ashr %0, 1i16 + %3: i32 = ashr %1, 2i32 + black_box %2 black_box %3 - black_box %4 - black_box %5 ", " ... - ; %3: i16 = ashr %0, 1i16 + ; %2: i16 = ashr %0, 1i16 ... movsx r.64.a, r.16.a sar r.64.a, 0x01 - ; %4: i32 = ashr %1, 2i32 + ; %3: i32 = ashr %1, 2i32 ... sar r.32.c, 0x02 - ; %5: i63 = ashr %2, 3i63 - ... - shl r.64.e, 0x01 - sar r.64.e, 0x01 - sar r.64.e, 0x03 ... ", false, @@ -3540,28 +3536,18 @@ mod tests { entry: %0: i16 = param 0 %1: i32 = param 1 - %2: i63 = param 2 - %3: i16 = lshr %0, 1i16 - %4: i32 = lshr %1, 2i32 - %5: i63 = lshr %2, 3i63 + %2: i16 = lshr %0, 1i16 + %3: i32 = lshr %1, 2i32 + black_box %2 black_box %3 - black_box %4 - black_box %5 ", " ... - ; %3: i16 = lshr %0, 1i16 - ... - movzx r.64.a, r.16.a + ; %2: i16 = lshr %0, 1i16 + and r.64.a, 0xffff shr r.64.a, 0x01 - ; %4: i32 = lshr %1, 2i32 - ... + ; %3: i32 = lshr %1, 2i32 shr r.32.c, 0x02 - ; %5: i63 = lshr %2, 3i63 - ... - shl r.64.e, 0x01 - shr r.64.e, 0x01 - shr r.64.e, 0x03 ... ", false, @@ -3575,27 +3561,22 @@ mod tests { entry: %0: i16 = param 0 %1: i32 = param 1 - %2: i63 = param 2 - %3: i16 = shl %0, 1i16 - %4: i32 = shl %1, 2i32 - %5: i63 = shl %2, 3i63 - %6: i32 = shl %1, %4 + %2: i16 = shl %0, 1i16 + %3: i32 = shl %1, 2i32 + %4: i32 = shl %1, %3 + black_box %2 black_box %3 black_box %4 - black_box %5 - black_box %6 ", " ... - ; %3: i16 = shl %0, 1i16 + ; %2: i16 = shl %0, 1i16 ... shl r.64.a, 0x01 - ; %4: i32 = shl %1, 2i32 + ; %3: i32 = shl %1, 2i32 ... shl r.32.a, 0x02 - ; %5: i63 = shl %2, 3i63 - shl r.64.b, 0x03 - ; %6: i32 = shl %1, %4 + ; %4: i32 = shl %1, %3 shl r.32.b, cl ... ", @@ -3612,26 +3593,19 @@ mod tests { %1: i16 = param 1 %2: i32 = param 2 %3: i32 = param 3 - %4: i63 = param 4 - %5: i63 = param 5 - %6: i16 = mul %0, %1 - %7: i32 = mul %2, %3 - %8: i63 = mul %4, %5 - black_box %6 - black_box %7 - black_box %8 + %4: i16 = mul %0, %1 + %5: i32 = mul %2, %3 + black_box %4 + black_box %5 ", " ... - ; %6: i16 = mul %0, %1 + ; %4: i16 = mul %0, %1 mov rax, ... mul r.64.a - ; %7: i32 = mul %2, %3 + ; %5: i32 = mul %2, %3 ...... mul r.64.b - ; %8: i63 = mul %4, %5 - ...... - mul r.64.c ... ", false, @@ -3647,27 +3621,21 @@ mod tests { %1: i16 = param 1 %2: i32 = param 2 %3: i32 = param 3 - %4: i63 = param 4 - %5: i63 = param 5 - %6: i1 = param 6 - %7: i16 = or %0, %1 - %8: i32 = or %2, %3 - %9: i63 = or %4, %5 - %10: i1 = or %6, 1i1 + %4: i1 = param 4 + %5: i16 = or %0, %1 + %6: i32 = or %2, %3 + %7: i1 = or %4, 1i1 + black_box %5 + black_box %6 black_box %7 - black_box %8 - black_box %9 - black_box %10 ", " ... - ; %7: i16 = or %0, %1 + ; %5: i16 = or %0, %1 or r.64.a, r.64.b - ; %8: i32 = or %2, %3 + ; %6: i32 = or %2, %3 or r.64.c, r.64.d - ; %9: i63 = or %4, %5 - or r.64.e, r.64.f - ; %10: i1 = or %6, 1i1 + ; %7: i1 = or %4, 1i1 or r.32._, 0x01 ... ", @@ -3684,35 +3652,23 @@ mod tests { %1: i16 = param 1 %2: i32 = param 2 %3: i32 = param 3 - %4: i63 = param 4 - %5: i63 = param 5 - %6: i16 = sdiv %0, %1 - %7: i32 = sdiv %2, %3 - %8: i63 = sdiv %4, %5 - black_box %6 - black_box %7 - black_box %8 + %4: i16 = sdiv %0, %1 + %5: i32 = sdiv %2, %3 + black_box %4 + black_box %5 ", " ... - ; %6: i16 = sdiv %0, %1 + ; %4: i16 = sdiv %0, %1 ... movsx rax, ax movsx r.64.a, r.16.a cqo idiv r.64.a - ; %7: i32 = sdiv %2, %3 + ; %5: i32 = sdiv %2, %3 ... cdq idiv r.32.b - ; %8: i63 = sdiv %4, %5 - ... - shl rax, 0x01 - sar rax, 0x01 - shl r.64.c, 0x01 - sar r.64.c, 0x01 - cqo - idiv r.64.c ... ", false, @@ -3728,35 +3684,23 @@ mod tests { %1: i16 = param 1 %2: i32 = param 2 %3: i32 = param 3 - %4: i63 = param 4 - %5: i63 = param 5 - %6: i16 = srem %0, %1 - %7: i32 = srem %2, %3 - %8: i63 = srem %4, %5 - black_box %6 - black_box %7 - black_box %8 + %4: i16 = srem %0, %1 + %5: i32 = srem %2, %3 + black_box %4 + black_box %5 ", " ... - ; %6: i16 = srem %0, %1 + ; %4: i16 = srem %0, %1 ... movsx rax, ax movsx r.64.a, r.16.a cqo idiv r.64.a - ; %7: i32 = srem %2, %3 + ; %5: i32 = srem %2, %3 ... cdq idiv r.32.b - ; %8: i63 = srem %4, %5 - ... - shl rax, 0x01 - sar rax, 0x01 - shl r.64.c, 0x01 - sar r.64.c, 0x01 - cqo - idiv r.64.c ... ", false, @@ -3772,32 +3716,22 @@ mod tests { %1: i16 = param 1 %2: i32 = param 2 %3: i32 = param 3 - %4: i63 = param 4 - %5: i63 = param 5 - %6: i16 = sub %0, %1 - %7: i32 = sub %2, %3 - %8: i63 = sub %4, %5 - %9: i32 = sub 0i32, %7 + %4: i16 = sub %0, %1 + %5: i32 = sub %2, %3 + %6: i32 = sub 0i32, %5 + black_box %4 + black_box %5 black_box %6 - black_box %7 - black_box %8 - black_box %9 ", " ... - ; %6: i16 = sub %0, %1 + ; %4: i16 = sub %0, %1 movsx r.64.a, r.16.a movsx r.64.b, r.16.b sub r.64.a, r.64.b - ; %7: i32 = sub %2, %3 + ; %5: i32 = sub %2, %3 sub r.32.c, r.32.d - ; %8: i63 = sub %4, %5 - shl r.64.e, 0x01 - sar r.64.e, 0x01 - shl r.64.f, 0x01 - sar r.64.f, 0x01 - sub r.64.e, r.64.f - ; %9: i32 = sub 0i32, %7 + ; %6: i32 = sub 0i32, %5 ...... neg r.32.c ... @@ -3815,27 +3749,21 @@ mod tests { %1: i16 = param 1 %2: i32 = param 2 %3: i32 = param 3 - %4: i63 = param 4 - %5: i63 = param 5 - %6: i1 = param 6 - %7: i16 = xor %0, %1 - %8: i32 = xor %2, %3 - %9: i63 = xor %4, %5 - %10: i1 = xor %6, 1i1 + %4: i1 = param 4 + %5: i16 = xor %0, %1 + %6: i32 = xor %2, %3 + %7: i1 = xor %4, 1i1 + black_box %5 + black_box %6 black_box %7 - black_box %8 - black_box %9 - black_box %10 ", " ... - ; %7: i16 = xor %0, %1 + ; %5: i16 = xor %0, %1 xor r.64.a, r.64.b - ; %8: i32 = xor %2, %3 + ; %6: i32 = xor %2, %3 xor r.64.c, r.64.d - ; %9: i63 = xor %4, %5 - xor r.64.e, r.64.f - ; %10: i1 = xor %6, 1i1 + ; %7: i1 = xor %4, 1i1 xor r.32._, 0x01 ... ", @@ -3852,35 +3780,23 @@ mod tests { %1: i16 = param 1 %2: i32 = param 2 %3: i32 = param 3 - %4: i63 = param 4 - %5: i63 = param 5 - %6: i16 = udiv %0, %1 - %7: i32 = udiv %2, %3 - %8: i63 = udiv %4, %5 - black_box %6 - black_box %7 - black_box %8 + %4: i16 = udiv %0, %1 + %5: i32 = udiv %2, %3 + black_box %4 + black_box %5 ", " ... - ; %6: i16 = udiv %0, %1 - ... - movzx rax, ax - movzx r.64.a, r.16.a + ; %4: i16 = udiv %0, %1 + mov rax, r.64.a + and rax, 0xffff + and r.64.b, 0xffff xor rdx, rdx - div r.64.a - ; %7: i32 = udiv %2, %3 + div r.64.b + ; %5: i32 = udiv %2, %3 ... xor edx, edx div r.32.b - ; %8: i63 = udiv %4, %5 - ... - shl rax, 0x01 - shr rax, 0x01 - shl r.64.c, 0x01 - shr r.64.c, 0x01 - xor rdx, rdx - div r.64.c ... ", false, @@ -3930,6 +3846,9 @@ mod tests { ... mov rdx, r.64.x mov rdi, r.64.y + mov edx, edx + mov esi, esi + mov edi, edi mov r.64.tgt, 0x{sym_addr:X} call r.64.tgt ... @@ -3944,21 +3863,26 @@ mod tests { let sym_addr = symbol_to_ptr("puts").unwrap().addr(); codegen_and_test( " - func_decl puts (i8, i16, ptr) + func_decl puts(i8, i16, ptr, i1) entry: %0: i8 = param 0 %1: i16 = param 1 %2: ptr = param 2 - call @puts(%0, %1, %2) + %3: i1 = param 3 + call @puts(%0, %1, %2, %3) ", &format!( " ... - ; call @puts(%0, %1, %2) + ; call @puts(%0, %1, %2, %3) ... - mov rdx, r.64.x - mov rdi, r.64.y + mov rcx, r.64.x + mov rdx, r.64.y + mov rdi, r.64.z + and rcx, 0x01 + and rsi, 0xffff + and rdi, 0xff mov r.64.tgt, 0x{sym_addr:X} call r.64.tgt ... @@ -4020,8 +3944,8 @@ mod tests { ; %3: i1 = eq %0, %0 ...... ...... - movzx r.64.a, r.16.a - movzx r.64.b, r.16.b + and r.64.a, 0xffff + and r.64.b, 0xffff cmp r.64.a, r.64.b setz r.8._ ... @@ -4044,23 +3968,17 @@ mod tests { entry: %0: i16 = param 0 %1: i32 = param 1 - %2: i63 = param 2 - %3: i32 = sext %0 - %4: i64 = sext %1 - %5: i64 = sext %2 + %2: i32 = sext %0 + %3: i64 = sext %1 + black_box %2 black_box %3 - black_box %4 - black_box %5 ", " ... - ; %3: i32 = sext %0 + ; %2: i32 = sext %0 movsx r.64.a, r.16.a - ; %4: i64 = sext %1 + ; %3: i64 = sext %1 movsxd r.64.b, r.32.b - ; %5: i64 = sext %2 - shl r.64.c, 0x01 - sar r.64.c, 0x01 ... ", false, @@ -4074,24 +3992,15 @@ mod tests { entry: %0: i16 = param 0 %1: i32 = param 1 - %2: i63 = param 2 - %3: i32 = zext %0 - %4: i64 = zext %1 - %5: i64 = zext %2 + %2: i32 = zext %0 + %3: i64 = zext %1 + black_box %2 black_box %3 - black_box %4 - black_box %5 ", " ... - ; %3: i32 = zext %0 - movzx r.64.a, r.16.a - ; %4: i64 = zext %1 - ... - ; %5: i64 = zext %2 - shl r.64.c, 0x01 - shr r.64.c, 0x01 - ... + ; %2: i32 = zext %0 + and r.64.a, 0xffff ", false, ); @@ -4227,7 +4136,7 @@ mod tests { ... ; %1: i1 = eq %0, 3i8 ... - movzx r.64.x, r.8._ + and r.64.x, 0xff cmp r.64.x, 0x03 setz r.8.x ... @@ -4248,7 +4157,7 @@ mod tests { " ... ; %1: i1 = eq %0, 3i8 - movzx r.64.x, r.8._ + and r.64.x, 0xff cmp r.64.x, 0x03 ; guard true, %1, [] ; ... jnz 0x... @@ -4273,15 +4182,15 @@ mod tests { " ... ; %1: i1 = eq %0, 3i8 - movzx r.64.x, r.8._ + and r.64.x, 0xff cmp r.64.x, 0x03 setz bl ; guard true, %1, [] ; ... bt r.64._, 0x00 jnb 0x... ; %3: i8 = sext %1 - shl rbx, 0x3f - sar rbx, 0x3f + and r.64.x, 0x01 + neg r.64.x ; black_box %3 ... ", @@ -4376,32 +4285,6 @@ mod tests { ); } - #[test] - fn cg_srem_i56() { - codegen_and_test( - " - entry: - %0: i56 = param 0 - %1: i56 = param 1 - %2: i56 = srem %0, %1 - black_box %2 - ", - " - ... - ; %2: i56 = srem %0, %1 - mov rax, r.64.y - shl rax, 0x08 - sar rax, 0x08 - shl rsi, 0x08 - sar rsi, 0x08 - cqo - idiv r.64.x - ... - ", - false, - ); - } - #[test] fn cg_trunc() { codegen_and_test( diff --git a/ykrt/src/compile/jitc_yk/jit_ir/mod.rs b/ykrt/src/compile/jitc_yk/jit_ir/mod.rs index 126ad2781..171c2a461 100644 --- a/ykrt/src/compile/jitc_yk/jit_ir/mod.rs +++ b/ykrt/src/compile/jitc_yk/jit_ir/mod.rs @@ -1008,7 +1008,7 @@ impl Ty { } /// Returns the size of the type in bits, or `None` if asking the size makes no sense. - pub(crate) fn bit_size(&self) -> Option { + pub(crate) fn bitw(&self) -> Option { match self { Self::Void => Some(0), Self::Integer(bits) => Some(usize::try_from(*bits).unwrap()), @@ -1168,6 +1168,18 @@ impl Operand { } } + /// Returns the size of the operand in bits. + /// + /// # Panics + /// + /// Panics if asking for the size make no sense for this operand. + pub(crate) fn bitw(&self, m: &Module) -> usize { + match self { + Self::Var(l) => m.inst_raw(*l).def_bitw(m), + Self::Const(cidx) => m.type_(m.const_(*cidx).tyidx(m)).bitw().unwrap(), + } + } + /// Returns the type index of the operand. pub(crate) fn tyidx(&self, m: &Module) -> TyIdx { match self { @@ -1844,7 +1856,7 @@ impl Inst { Ok(inst) } - /// Returns the size of the local variable that this instruction defines (if any). + /// Returns the size in bytes of the local variable that this instruction defines (if any). /// /// # Panics /// @@ -1863,6 +1875,25 @@ impl Inst { } } + /// Returns the bit width of the local variable that this instruction defines (if any). + /// + /// # Panics + /// + /// Panics if: + /// - The instruction defines no local variable. + /// - The instruction defines an unsized local variable. + pub(crate) fn def_bitw(&self, m: &Module) -> usize { + if let Some(ty) = self.def_type(m) { + if let Some(size) = ty.bitw() { + size + } else { + panic!() + } + } else { + panic!() + } + } + /// Is this instruction "equal" to `other` modulo `Copy` instructions? For example, given: /// /// ```text diff --git a/ykrt/src/compile/jitc_yk/jit_ir/well_formed.rs b/ykrt/src/compile/jitc_yk/jit_ir/well_formed.rs index 251183497..d8ff89c37 100644 --- a/ykrt/src/compile/jitc_yk/jit_ir/well_formed.rs +++ b/ykrt/src/compile/jitc_yk/jit_ir/well_formed.rs @@ -193,7 +193,7 @@ impl Module { self.inst(iidx).display(self, iidx) ); } - let val_bitsize = val_ty.bit_size().unwrap(); + let val_bitsize = val_ty.bitw().unwrap(); let dest_ty = self.type_(x.dest_tyidx()); if !matches!(dest_ty, Ty::Integer(_)) && !matches!(dest_ty, Ty::Ptr) { @@ -202,7 +202,7 @@ impl Module { self.inst(iidx).display(self, iidx) ); } - let dest_bitsize = dest_ty.bit_size().unwrap(); + let dest_bitsize = dest_ty.bitw().unwrap(); // FIXME: strictly this should be >= to be in line with LLVM semantics, but the // way we lower LLVM `ptrtoint` to `zext` means that pointer to integer @@ -228,8 +228,8 @@ impl Module { } // LLVM semantics: "The bit sizes of [source] value and the destination type // must be identical" - let val_bitsize = val_ty.bit_size().unwrap(); - let dest_bitsize = dest_ty.bit_size().unwrap(); + let val_bitsize = val_ty.bitw().unwrap(); + let dest_bitsize = dest_ty.bitw().unwrap(); if val_bitsize != dest_bitsize { panic!( "Instruction at position {iidx} trying to bitcast to a differently-sized type\n {}",