From 0eddf0cbc0a88121a67239d5899dca8a39c39cd8 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Sun, 18 Dec 2022 00:45:07 +0200 Subject: [PATCH 1/8] Sema: fix condition for non-pointer noalias error Closes #13987 --- src/Sema.zig | 4 +++- test/cases/compile_errors/noalias_on_non_pointer_param.zig | 6 ++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index ede4eaf0e725..e8ed010a7c8a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8820,7 +8820,9 @@ fn analyzeParameter( }; return sema.failWithOwnedErrorMsg(msg); } - if (!this_generic and is_noalias and !param.ty.isPtrAtRuntime()) { + if (!sema.is_generic_instantiation and !this_generic and is_noalias and + !(param.ty.zigTypeTag() == .Pointer or param.ty.isPtrLikeOptional())) + { return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); } } diff --git a/test/cases/compile_errors/noalias_on_non_pointer_param.zig b/test/cases/compile_errors/noalias_on_non_pointer_param.zig index f637013c93e4..806808820f4c 100644 --- a/test/cases/compile_errors/noalias_on_non_pointer_param.zig +++ b/test/cases/compile_errors/noalias_on_non_pointer_param.zig @@ -1,6 +1,12 @@ fn f(noalias x: i32) void { _ = x; } export fn entry() void { f(1234); } +fn generic(comptime T: type, noalias _: [*]T, noalias _: [*]const T, _: usize) void {} +comptime { _ = generic; } + +fn slice(noalias _: []u8) void {} +comptime { _ = slice; } + // error // backend=stage2 // target=native From 2926d95e6a7c4d83eb197f78de40718e97bc1f1b Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 19 Dec 2022 12:18:55 +0200 Subject: [PATCH 2/8] llvm: handle vectors in packed structs Closes #13201 --- src/codegen/llvm.zig | 6 +++--- test/behavior/bitcast.zig | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 7af987f4d65e..d96c38ed13d8 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -5966,7 +5966,7 @@ pub const FuncGen = struct { const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float) { + if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); @@ -5989,7 +5989,7 @@ pub const FuncGen = struct { assert(struct_ty.containerLayout() == .Packed); const containing_int = struct_llvm_val; const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float) { + if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); @@ -9889,7 +9889,7 @@ pub const FuncGen = struct { return result_ptr; } - if (info.pointee_type.zigTypeTag() == .Float) { + if (info.pointee_type.zigTypeTag() == .Float or info.pointee_type.zigTypeTag() == .Vector) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 8e0bf4ec32ab..023df6a73ddd 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -358,9 +358,6 @@ test "comptime @bitCast packed struct to int and back" { const rt_cast = @bitCast(S, i); const ct_cast = comptime @bitCast(S, @as(Int, 0)); inline for (@typeInfo(S).Struct.fields) |field| { - if (@typeInfo(field.type) == .Vector) - continue; //TODO: https://github.com/ziglang/zig/issues/13201 - try expectEqual(@field(rt_cast, field.name), @field(ct_cast, field.name)); } } From 22d46e1d7753ea2a9accc180e8613206120739c5 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 19 Dec 2022 15:07:11 +0200 Subject: [PATCH 3/8] value: use int tag type when querying for tag value Closes #13757 --- src/value.zig | 10 ++++++---- test/behavior/call.zig | 25 +++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/src/value.zig b/src/value.zig index 839b3d75801d..96242331f9cc 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1072,11 +1072,13 @@ pub const Value = extern union { .enum_simple => Module.EnumFull.ValueMap{}, else => unreachable, }; - break :field_index if (values.entries.len == 0) + if (values.entries.len == 0) { // auto-numbered enum - @intCast(u32, val.toUnsignedInt(mod.getTarget())) - else - @intCast(u32, values.getIndexContext(val, .{ .ty = ty, .mod = mod }).?); + break :field_index @intCast(u32, val.toUnsignedInt(mod.getTarget())); + } + var buffer: Type.Payload.Bits = undefined; + const int_tag_ty = ty.intTagType(&buffer); + break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?); }, }; diff --git a/test/behavior/call.zig b/test/behavior/call.zig index 4d8f22d15ff1..a8d0d40751f6 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -344,3 +344,28 @@ test "inline call doesn't re-evaluate non generic struct" { try @call(.always_inline, S.foo, ArgTuple{.{ .a = 123, .b = 45 }}); comptime try @call(.always_inline, S.foo, ArgTuple{.{ .a = 123, .b = 45 }}); } + +test "Enum constructed by @Type passed as generic argument" { + const S = struct { + const E = std.meta.FieldEnum(struct { + prev_pos: bool, + pos: bool, + vel: bool, + damp_vel: bool, + acc: bool, + rgba: bool, + prev_scale: bool, + scale: bool, + prev_rotation: bool, + rotation: bool, + angular_vel: bool, + alive: bool, + }); + fn foo(comptime a: E, b: u32) !void { + try expect(@enumToInt(a) == b); + } + }; + inline for (@typeInfo(S.E).Enum.fields) |_, i| { + try S.foo(@intToEnum(S.E, i), i); + } +} From ee334aea801c71cbcc567b1d19be9c04d911beda Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 19 Dec 2022 16:13:12 +0200 Subject: [PATCH 4/8] value: remove `indexVectorlike` Vectors can represented in all the same values as arrays so this was never a valid shortcut. --- src/Sema.zig | 220 ++++++++++++++----------- src/value.zig | 441 ++++++++++++++++++++++++++++++++++---------------- 2 files changed, 434 insertions(+), 227 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index e8ed010a7c8a..02f6b24e2d7e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -9225,7 +9225,7 @@ fn intCast( // If the destination type is signed, then we need to double its // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { - const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, target); + const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, sema.mod); break :range_val try sema.intAdd(range_minus_one, Value.one, unsigned_operand_ty); } else dest_max_val; const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); @@ -11683,9 +11683,11 @@ fn zirShl( if (rhs_ty.zigTypeTag() == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { - if (rhs_val.indexVectorlike(i).compareHetero(.gte, bit_value, target)) { + var elem_value_buf: Value.ElemValueBuffer = undefined; + const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + if (rhs_elem.compareHetero(.gte, bit_value, target)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, sema.mod), i, scalar_ty.fmt(sema.mod), }); @@ -11701,9 +11703,11 @@ fn zirShl( if (rhs_ty.zigTypeTag() == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { - if (rhs_val.indexVectorlike(i).compareHetero(.lt, Value.zero, target)) { + var elem_value_buf: Value.ElemValueBuffer = undefined; + const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + if (rhs_elem.compareHetero(.lt, Value.zero, target)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } @@ -11726,7 +11730,7 @@ fn zirShl( const val = switch (air_tag) { .shl_exact => val: { - const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, target); + const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod); if (scalar_ty.zigTypeTag() == .ComptimeInt) { break :val shifted.wrapped_result; } @@ -11737,14 +11741,14 @@ fn zirShl( }, .shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt) - try lhs_val.shl(rhs_val, lhs_ty, sema.arena, target) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) else - try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, target), + try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod), .shl => if (scalar_ty.zigTypeTag() == .ComptimeInt) - try lhs_val.shl(rhs_val, lhs_ty, sema.arena, target) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) else - try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, target), + try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod), else => unreachable, }; @@ -11867,9 +11871,11 @@ fn zirShr( if (rhs_ty.zigTypeTag() == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { - if (rhs_val.indexVectorlike(i).compareHetero(.gte, bit_value, target)) { + var elem_value_buf: Value.ElemValueBuffer = undefined; + const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + if (rhs_elem.compareHetero(.gte, bit_value, target)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, sema.mod), i, scalar_ty.fmt(sema.mod), }); @@ -11885,9 +11891,11 @@ fn zirShr( if (rhs_ty.zigTypeTag() == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { - if (rhs_val.indexVectorlike(i).compareHetero(.lt, Value.zero, target)) { + var elem_value_buf: Value.ElemValueBuffer = undefined; + const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + if (rhs_elem.compareHetero(.lt, Value.zero, target)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } @@ -11903,12 +11911,12 @@ fn zirShr( } if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. - const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, target); + const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, sema.mod); if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } - const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, target); + const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, sema.mod); return sema.addConstant(lhs_ty, val); } else { break :rs lhs_src; @@ -11992,7 +12000,6 @@ fn zirBitwise( const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; - const target = sema.mod.getTarget(); if (!is_int) { return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); @@ -12004,9 +12011,9 @@ fn zirBitwise( if (try sema.resolveMaybeUndefValIntable(casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefValIntable(casted_rhs)) |rhs_val| { const result_val = switch (air_tag) { - .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, target), - .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, target), - .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, target), + .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, sema.mod), + .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, sema.mod), + .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, sema.mod), else => unreachable, }; return sema.addConstant(resolved_type, result_val); @@ -12033,7 +12040,6 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operand = try sema.resolveInst(inst_data.operand); const operand_type = sema.typeOf(operand); const scalar_type = operand_type.scalarType(); - const target = sema.mod.getTarget(); if (scalar_type.zigTypeTag() != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ @@ -12050,14 +12056,14 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf); - elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target); + elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod); } return sema.addConstant( operand_type, try Value.Tag.aggregate.create(sema.arena, elems), ); } else { - const result_val = try val.bitwiseNot(operand_type, sema.arena, target); + const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod); return sema.addConstant(operand_type, result_val); } } @@ -12586,8 +12592,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // We handle float negation here to ensure negative zero is represented in the bits. if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(rhs_ty); - const target = sema.mod.getTarget(); - return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, target)); + return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod)); } try sema.requireRuntimeBlock(block, src, null); return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); @@ -12679,7 +12684,6 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div); const mod = sema.mod; - const target = mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -12690,7 +12694,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If lhs % rhs is 0, it doesn't matter. const lhs_val = maybe_lhs_val orelse unreachable; const rhs_val = maybe_rhs_val orelse unreachable; - const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target) catch unreachable; + const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable; if (!rem.compareAllWithZero(.eq)) { return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod), @@ -12766,7 +12770,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_rhs_val) |rhs_val| { if (is_int) { - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target); + const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); var vector_index: usize = undefined; if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); @@ -12775,7 +12779,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } else { return sema.addConstant( resolved_type, - try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target), + try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod), ); } } else { @@ -12839,7 +12843,6 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact); const mod = sema.mod; - const target = mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -12884,24 +12887,24 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (is_int) { - const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target); + const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod); if (!(modulus_val.compareAllWithZero(.eq))) { return sema.fail(block, src, "exact division produced remainder", .{}); } - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target); + const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); var vector_index: usize = undefined; if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); } return sema.addConstant(resolved_type, res); } else { - const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target); + const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod); if (!(modulus_val.compareAllWithZero(.eq))) { return sema.fail(block, src, "exact division produced remainder", .{}); } return sema.addConstant( resolved_type, - try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target), + try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs rhs_src; @@ -13004,7 +13007,6 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor); const mod = sema.mod; - const target = mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13064,12 +13066,12 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (is_int) { return sema.addConstant( resolved_type, - try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, target), + try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, mod), ); } else { return sema.addConstant( resolved_type, - try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, target), + try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs rhs_src; @@ -13121,7 +13123,6 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc); const mod = sema.mod; - const target = mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13178,7 +13179,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_rhs_val) |rhs_val| { if (is_int) { - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target); + const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); var vector_index: usize = undefined; if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); @@ -13187,7 +13188,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else { return sema.addConstant( resolved_type, - try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, target), + try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs rhs_src; @@ -13365,7 +13366,6 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem); const mod = sema.mod; - const target = mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13442,7 +13442,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } return sema.addConstant( resolved_type, - try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target), + try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod), ); } else { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); @@ -13471,7 +13471,11 @@ fn intRem( if (ty.zigTypeTag() == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sema.intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i)); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -13541,7 +13545,6 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod); const mod = sema.mod; - const target = mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13573,7 +13576,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_lhs_val) |lhs_val| { return sema.addConstant( resolved_type, - try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target), + try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod), ); } break :rs lhs_src; @@ -13597,7 +13600,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_rhs_val) |rhs_val| { return sema.addConstant( resolved_type, - try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target), + try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod), ); } else break :rs rhs_src; } else break :rs lhs_src; @@ -13644,7 +13647,6 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem); const mod = sema.mod; - const target = mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13700,7 +13702,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_rhs_val) |rhs_val| { return sema.addConstant( resolved_type, - try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target), + try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod), ); } else break :rs rhs_src; } else break :rs lhs_src; @@ -13739,7 +13741,6 @@ fn zirOverflowArithmetic( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const mod = sema.mod; - const target = mod.getTarget(); // Note, the types of lhs/rhs (also for shifting)/ptr are already correct as ensured by astgen. try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); @@ -13839,7 +13840,7 @@ fn zirOverflowArithmetic( break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) }; } - const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, target); + const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, mod); const overflowed = try sema.addConstant(overflowed_ty, result.overflowed); const wrapped = try sema.addConstant(dest_ty, result.wrapped_result); break :result .{ .overflowed = overflowed, .wrapped = wrapped }; @@ -13866,7 +13867,7 @@ fn zirOverflowArithmetic( break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) }; } - const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, target); + const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, sema.mod); const overflowed = try sema.addConstant(overflowed_ty, result.overflowed); const wrapped = try sema.addConstant(dest_ty, result.wrapped_result); break :result .{ .overflowed = overflowed, .wrapped = wrapped }; @@ -13979,13 +13980,12 @@ fn analyzeArithmetic( try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag); const mod = sema.mod; - const target = mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { switch (zir_tag) { .add => { - // For integers: + // For integers:intAddSat // If either of the operands are zero, then the other operand is // returned, even if it is undefined. // If either of the operands are undefined, it's a compile error @@ -14080,7 +14080,7 @@ fn analyzeArithmetic( const val = if (scalar_tag == .ComptimeInt) try sema.intAdd(lhs_val, rhs_val, resolved_type) else - try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, target); + try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod); return sema.addConstant(resolved_type, val); } else break :rs .{ .src = lhs_src, .air_tag = .add_sat }; @@ -14177,7 +14177,7 @@ fn analyzeArithmetic( const val = if (scalar_tag == .ComptimeInt) try sema.intSub(lhs_val, rhs_val, resolved_type) else - try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, target); + try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod); return sema.addConstant(resolved_type, val); } else break :rs .{ .src = rhs_src, .air_tag = .sub_sat }; @@ -14258,7 +14258,7 @@ fn analyzeArithmetic( } } if (is_int) { - const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, target); + const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod); var vector_index: usize = undefined; if (!(try sema.intFitsInType(product, resolved_type, &vector_index))) { return sema.failWithIntegerOverflow(block, src, resolved_type, product, vector_index); @@ -14267,7 +14267,7 @@ fn analyzeArithmetic( } else { return sema.addConstant( resolved_type, - try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, target), + try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, sema.mod), ); } } else break :rs .{ .src = lhs_src, .air_tag = air_tag }; @@ -14311,7 +14311,7 @@ fn analyzeArithmetic( } return sema.addConstant( resolved_type, - try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, target), + try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, sema.mod), ); } else break :rs .{ .src = lhs_src, .air_tag = air_tag }; } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -14353,9 +14353,9 @@ fn analyzeArithmetic( } const val = if (scalar_tag == .ComptimeInt) - try lhs_val.intMul(rhs_val, resolved_type, sema.arena, target) + try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod) else - try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, target); + try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, sema.mod); return sema.addConstant(resolved_type, val); } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat }; @@ -17947,7 +17947,7 @@ fn zirUnaryMath( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime eval: fn (Value, Type, Allocator, std.Target) Allocator.Error!Value, + comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -17956,7 +17956,6 @@ fn zirUnaryMath( const operand = try sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); switch (operand_ty.zigTypeTag()) { .ComptimeFloat, .Float => {}, @@ -17983,7 +17982,7 @@ fn zirUnaryMath( const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try eval(elem_val, scalar_ty, sema.arena, target); + elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod); } return sema.addConstant( result_ty, @@ -17998,7 +17997,7 @@ fn zirUnaryMath( if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { if (operand_val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try eval(operand_val, operand_ty, sema.arena, target); + const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod); return sema.addConstant(operand_ty, result_val); } @@ -19220,8 +19219,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! _ = try sema.checkIntType(block, operand_src, operand_ty); if (try sema.resolveMaybeUndefVal(operand)) |val| { - const target = sema.mod.getTarget(); - const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, target, sema); + const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema); return sema.addConstant(dest_ty, result_val); } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known"); @@ -19547,14 +19545,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!is_vector) { return sema.addConstant( dest_ty, - try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, target), + try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod), ); } var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, operand_ty.vectorLen()); for (elems) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, target); + elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); } return sema.addConstant( dest_ty, @@ -20523,13 +20521,13 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. while (i < vec_len) : (i += 1) { const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf); switch (operation) { - .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, target), - .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, target), - .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, target), + .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, sema.mod), + .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, sema.mod), + .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, sema.mod), .Min => accum = accum.numberMin(elem_val, target), .Max => accum = accum.numberMax(elem_val, target), .Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty), - .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, target), + .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, sema.mod), } } return sema.addConstant(scalar_ty, accum); @@ -20925,10 +20923,10 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Xchg => operand_val, .Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty), .Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty), - .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, target), - .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, target), - .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, target), - .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, target), + .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, sema.mod), + .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, sema.mod), + .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, sema.mod), + .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, sema.mod), .Max => stored_val.numberMax (operand_val, target), .Min => stored_val.numberMin (operand_val, target), // zig fmt: on @@ -21001,8 +20999,6 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const mulend1 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend1), mulend1_src); const mulend2 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend2), mulend2_src); - const target = sema.mod.getTarget(); - const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1); const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2); const maybe_addend = try sema.resolveMaybeUndefVal(addend); @@ -21018,7 +21014,7 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (maybe_addend) |addend_val| { if (addend_val.isUndef()) return sema.addConstUndef(ty); - const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, target); + const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod); return sema.addConstant(ty, result_val); } else { break :rs addend_src; @@ -24830,7 +24826,7 @@ fn coerceExtra( } break :int; }; - const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, target, sema); + const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, sema.mod, sema); // TODO implement this compile error //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, mod)) { @@ -32263,7 +32259,11 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { if (ty.zigTypeTag() == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sema.intAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i)); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -32297,7 +32297,11 @@ fn numberAddWrap( if (ty.zigTypeTag() == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sema.numberAddWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType()); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + scalar.* = try sema.numberAddWrapScalar(lhs_elem, rhs_elem, ty.scalarType()); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -32334,7 +32338,11 @@ fn intSub( if (ty.zigTypeTag() == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sema.intSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i)); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -32368,7 +32376,11 @@ fn numberSubWrap( if (ty.zigTypeTag() == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sema.numberSubWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType()); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + scalar.* = try sema.numberSubWrapScalar(lhs_elem, rhs_elem, ty.scalarType()); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -32405,7 +32417,11 @@ fn floatAdd( if (float_type.zigTypeTag() == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sema.floatAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType()); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType()); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -32458,7 +32474,11 @@ fn floatSub( if (float_type.zigTypeTag() == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sema.floatSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType()); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType()); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -32512,7 +32532,11 @@ fn intSubWithOverflow( const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - const of_math_result = try sema.intSubWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType()); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); overflowed_data[i] = of_math_result.overflowed; scalar.* = of_math_result.wrapped_result; } @@ -32562,7 +32586,9 @@ fn floatToInt( const elem_ty = float_ty.childType(); const result_data = try sema.arena.alloc(Value, float_ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sema.floatToIntScalar(block, src, val.indexVectorlike(i), elem_ty, int_ty.scalarType()); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(sema.mod, i, &buf); + scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType()); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -32857,7 +32883,11 @@ fn intAddWithOverflow( const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - const of_math_result = try sema.intAddWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType()); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); overflowed_data[i] = of_math_result.overflowed; scalar.* = of_math_result.wrapped_result; } @@ -32909,7 +32939,11 @@ fn compareAll( if (ty.zigTypeTag() == .Vector) { var i: usize = 0; while (i < ty.vectorLen()) : (i += 1) { - if (!(try sema.compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType()))) { + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()))) { return false; } } @@ -32953,7 +32987,11 @@ fn compareVector( assert(ty.zigTypeTag() == .Vector); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - const res_bool = try sema.compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType()); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()); scalar.* = Value.makeBool(res_bool); } return Value.Tag.aggregate.create(sema.arena, result_data); diff --git a/src/value.zig b/src/value.zig index 96242331f9cc..2ef18e619808 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2044,7 +2044,11 @@ pub const Value = extern union { if (ty.zigTypeTag() == .Vector) { var i: usize = 0; while (i < ty.vectorLen()) : (i += 1) { - if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod)) { + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(), mod)) { return false; } } @@ -2793,27 +2797,6 @@ pub const Value = extern union { }; } - /// Index into a vector-like `Value`. Asserts `index` is a valid index for `val`. - /// Some scalar values are considered vector-like to avoid needing to allocate - /// a new `repeated` each time a constant is used. - pub fn indexVectorlike(val: Value, index: usize) Value { - return switch (val.tag()) { - .aggregate => val.castTag(.aggregate).?.data[index], - - .repeated => val.castTag(.repeated).?.data, - // These values will implicitly be treated as `repeated`. - .zero, - .one, - .bool_false, - .bool_true, - .int_i64, - .int_u64, - => val, - - else => unreachable, - }; - } - /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value { @@ -2889,18 +2872,21 @@ pub const Value = extern union { // to have only one possible value itself. .the_only_possible_value => return val, - // pointer to integer casted to pointer of array - .int_u64, .int_i64 => { - assert(index == 0); - return val; - }, - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + // These values will implicitly be treated as `repeated`. + .zero, + .one, + .bool_false, + .bool_true, + .int_i64, + .int_u64, + => return val, + else => unreachable, } } @@ -3172,18 +3158,21 @@ pub const Value = extern union { }; } - pub fn intToFloat(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target) !Value { - return intToFloatAdvanced(val, arena, int_ty, float_ty, target, null) catch |err| switch (err) { + pub fn intToFloat(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value { + return intToFloatAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => unreachable, }; } - pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target, opt_sema: ?*Sema) !Value { + pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { + const target = mod.getTarget(); if (int_ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, int_ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intToFloatScalar(val.indexVectorlike(i), arena, float_ty.scalarType(), target, opt_sema); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3289,12 +3278,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intAddSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3333,12 +3327,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intSubSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3376,13 +3375,18 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const overflowed_data = try arena.alloc(Value, ty.vectorLen()); const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - const of_math_result = try intMulWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); overflowed_data[i] = of_math_result.overflowed; scalar.* = of_math_result.wrapped_result; } @@ -3435,16 +3439,20 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { if (ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try numberMulWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return numberMulWrapScalar(lhs, rhs, ty, arena, target); + return numberMulWrapScalar(lhs, rhs, ty, arena, mod); } /// Supports both floats and ints; handles undefined. @@ -3453,19 +3461,19 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); if (ty.zigTypeTag() == .ComptimeInt) { - return intMul(lhs, rhs, ty, arena, target); + return intMul(lhs, rhs, ty, arena, mod); } if (ty.isAnyFloat()) { - return floatMul(lhs, rhs, ty, arena, target); + return floatMul(lhs, rhs, ty, arena, mod); } - const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, target); + const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, mod); return overflow_result.wrapped_result; } @@ -3475,12 +3483,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intMulSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3547,11 +3560,14 @@ pub const Value = extern union { } /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, target: Target) !Value { + pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try bitwiseNotScalar(val.indexVectorlike(i), ty.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3583,11 +3599,16 @@ pub const Value = extern union { } /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try bitwiseAndScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -3615,37 +3636,46 @@ pub const Value = extern union { } /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value { + pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try bitwiseNandScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return bitwiseNandScalar(lhs, rhs, ty, arena, target); + return bitwiseNandScalar(lhs, rhs, ty, arena, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value { + pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - const anded = try bitwiseAnd(lhs, rhs, ty, arena, target); + const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); const all_ones = if (ty.isSignedInt()) try Value.Tag.int_i64.create(arena, -1) else - try ty.maxInt(arena, target); + try ty.maxInt(arena, mod.getTarget()); - return bitwiseXor(anded, all_ones, ty, arena, target); + return bitwiseXor(anded, all_ones, ty, arena, mod); } /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try bitwiseOrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -3672,11 +3702,16 @@ pub const Value = extern union { } /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try bitwiseXorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -3703,11 +3738,16 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } - pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -3739,11 +3779,16 @@ pub const Value = extern union { return fromBigInt(allocator, result_q.toConst()); } - pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -3775,11 +3820,16 @@ pub const Value = extern union { return fromBigInt(allocator, result_q.toConst()); } - pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -3846,11 +3896,16 @@ pub const Value = extern union { }; } - pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try floatRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3888,11 +3943,16 @@ pub const Value = extern union { } } - pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try floatModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3930,11 +3990,16 @@ pub const Value = extern union { } } - pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -3962,11 +4027,14 @@ pub const Value = extern union { return fromBigInt(allocator, result_bigint.toConst()); } - pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value { + pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, bits, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -3980,12 +4048,17 @@ pub const Value = extern union { allocator: Allocator, signedness: std.builtin.Signedness, bits: Value, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, @intCast(u16, bits.indexVectorlike(i).toUnsignedInt(target)), target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + var bits_buf: Value.ElemValueBuffer = undefined; + const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf); + scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(target)), target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -4008,11 +4081,16 @@ pub const Value = extern union { return fromBigInt(allocator, result_bigint.toConst()); } - pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try shlScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -4043,13 +4121,18 @@ pub const Value = extern union { rhs: Value, ty: Type, allocator: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const overflowed_data = try allocator.alloc(Value, ty.vectorLen()); const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - const of_math_result = try shlWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), allocator, target); overflowed_data[i] = of_math_result.overflowed; scalar.* = of_math_result.wrapped_result; } @@ -4097,12 +4180,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try shlSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4141,16 +4229,20 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { if (ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try shlTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return shlTruncScalar(lhs, rhs, ty, arena, target); + return shlTruncScalar(lhs, rhs, ty, arena, mod); } pub fn shlTruncScalar( @@ -4158,19 +4250,24 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { - const shifted = try lhs.shl(rhs, ty, arena, target); - const int_info = ty.intInfo(target); - const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, target); + const shifted = try lhs.shl(rhs, ty, arena, mod); + const int_info = ty.intInfo(mod.getTarget()); + const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod); return truncated; } - pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value { + pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (ty.zigTypeTag() == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try shrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, target); } return Value.Tag.aggregate.create(allocator, result_data); } @@ -4212,12 +4309,15 @@ pub const Value = extern union { val: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try floatNegScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4245,12 +4345,17 @@ pub const Value = extern union { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try floatDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4299,12 +4404,17 @@ pub const Value = extern union { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try floatDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4353,12 +4463,17 @@ pub const Value = extern union { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try floatDivTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4407,12 +4522,17 @@ pub const Value = extern union { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try floatMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4456,11 +4576,14 @@ pub const Value = extern union { } } - pub fn sqrt(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sqrtScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4493,11 +4616,14 @@ pub const Value = extern union { } } - pub fn sin(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try sinScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4530,11 +4656,14 @@ pub const Value = extern union { } } - pub fn cos(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try cosScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4567,11 +4696,14 @@ pub const Value = extern union { } } - pub fn tan(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try tanScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4604,11 +4736,14 @@ pub const Value = extern union { } } - pub fn exp(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try expScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4641,11 +4776,14 @@ pub const Value = extern union { } } - pub fn exp2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try exp2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4678,11 +4816,14 @@ pub const Value = extern union { } } - pub fn log(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try logScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4715,11 +4856,14 @@ pub const Value = extern union { } } - pub fn log2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try log2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4752,11 +4896,14 @@ pub const Value = extern union { } } - pub fn log10(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try log10Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4789,11 +4936,14 @@ pub const Value = extern union { } } - pub fn fabs(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try fabsScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4826,11 +4976,14 @@ pub const Value = extern union { } } - pub fn floor(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try floorScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4863,11 +5016,14 @@ pub const Value = extern union { } } - pub fn ceil(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try ceilScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4900,11 +5056,14 @@ pub const Value = extern union { } } - pub fn round(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try roundScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4937,11 +5096,14 @@ pub const Value = extern union { } } - pub fn trunc(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try truncScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + var buf: Value.ElemValueBuffer = undefined; + const elem_val = val.elemValueBuffer(mod, i, &buf); + scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4980,16 +5142,23 @@ pub const Value = extern union { mulend2: Value, addend: Value, arena: Allocator, - target: Target, - ) Allocator.Error!Value { + mod: *Module, + ) !Value { + const target = mod.getTarget(); if (float_type.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data) |*scalar, i| { + var mulend1_buf: Value.ElemValueBuffer = undefined; + const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf); + var mulend2_buf: Value.ElemValueBuffer = undefined; + const mulend2_elem = mulend2.elemValueBuffer(mod, i, &mulend2_buf); + var addend_buf: Value.ElemValueBuffer = undefined; + const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf); scalar.* = try mulAddScalar( float_type.scalarType(), - mulend1.indexVectorlike(i), - mulend2.indexVectorlike(i), - addend.indexVectorlike(i), + mulend1_elem, + mulend2_elem, + addend_elem, arena, target, ); From 0c1d8659c51d9544fb8d5de7481e750149c262ae Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 19 Dec 2022 16:30:09 +0200 Subject: [PATCH 5/8] Sema: print notes and reference traces when using `--debug-compile-errors` --- src/Compilation.zig | 2 +- src/Sema.zig | 21 ++++++++++----------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 1d0997d20cc6..b385fa5f72ba 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -572,7 +572,7 @@ pub const AllErrors = struct { self.arena.promote(gpa).deinit(); } - fn add( + pub fn add( module: *Module, arena: *std.heap.ArenaAllocator, errors: *std.ArrayList(Message), diff --git a/src/Sema.zig b/src/Sema.zig index 02f6b24e2d7e..f03211210cc3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -113,6 +113,7 @@ const target_util = @import("target.zig"); const Package = @import("Package.zig"); const crash_report = @import("crash_report.zig"); const build_options = @import("build_options"); +const Compilation = @import("Compilation.zig"); pub const default_branch_quota = 1000; pub const default_reference_trace_len = 2; @@ -2191,18 +2192,16 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { @setCold(true); if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) { - const err_path = err_msg.src_loc.file_scope.fullPath(sema.mod.gpa) catch unreachable; - const err_source = err_msg.src_loc.file_scope.getSource(sema.mod.gpa) catch unreachable; if (err_msg.src_loc.lazy == .unneeded) return error.NeededSourceLocation; - const err_span = err_msg.src_loc.span(sema.mod.gpa) catch unreachable; - const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main); - std.debug.print("compile error during Sema:\n{s}:{d}:{d}: error: {s}\n{s}\n\n", .{ - err_path, - err_loc.line + 1, - err_loc.column + 1, - err_msg.msg, - err_loc.source_line, - }); + var arena = std.heap.ArenaAllocator.init(sema.gpa); + errdefer arena.deinit(); + var errors = std.ArrayList(Compilation.AllErrors.Message).init(sema.gpa); + defer errors.deinit(); + + Compilation.AllErrors.add(sema.mod, &arena, &errors, err_msg.*) catch unreachable; + + std.debug.print("compile error during Sema:\n", .{}); + Compilation.AllErrors.Message.renderToStdErr(errors.items[0], .no_color); crash_report.compilerPanic("unexpected compile error occurred", null, null); } From 6511afcfe090f26345873e7e8db3ae301f8a18a7 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Mon, 19 Dec 2022 20:44:58 +0200 Subject: [PATCH 6/8] Sema: fix coercion from `[:0]T` to `[*c]T` --- src/Sema.zig | 5 +++-- test/behavior/cast.zig | 10 ++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index f03211210cc3..f068018ddb41 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -24615,8 +24615,9 @@ fn coerceExtra( else => break :p, } if (inst_info.size == .Slice) { - if (dest_info.sentinel == null or inst_info.sentinel == null or - !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, sema.mod)) + assert(dest_info.sentinel == null); + if (inst_info.sentinel == null or + !inst_info.sentinel.?.eql(Value.zero, dest_info.pointee_type, sema.mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 1822922ec218..0b276deba703 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1495,3 +1495,13 @@ test "cast typed undefined to int" { _ = b; } } + +test "implicit cast from [:0]T to [*c]T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + var a: [:0]const u8 = "foo"; + var b: [*c]const u8 = a; + var c = std.mem.span(b); + try expect(c.len == a.len); + try expect(c.ptr == a.ptr); +} From 6da070c5ac1707f2f5370e2e26124a3e114e25ea Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Tue, 20 Dec 2022 17:32:04 +0200 Subject: [PATCH 7/8] Sema: fix crash with generic function with generic function parameter Closes #12810 --- src/Sema.zig | 11 +++++++++++ test/behavior/call.zig | 12 ++++++++++++ 2 files changed, 23 insertions(+) diff --git a/src/Sema.zig b/src/Sema.zig index f068018ddb41..9de6945fc562 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7256,6 +7256,7 @@ fn instantiateGenericCall( child_block.error_return_trace_index = error_return_trace_index; const new_func_inst = child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst) catch |err| { + if (err == error.GenericPoison) return error.GenericPoison; // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. if (sema.owner_func) |owner_func| { @@ -8864,6 +8865,11 @@ fn zirParam( }; switch (err) { error.GenericPoison => { + if (sema.inst_map.get(inst)) |_| { + // A generic function is about to evaluate to another generic function. + // Return an error instead. + return error.GenericPoison; + } // The type is not available until the generic instantiation. // We result the param instruction with a poison value and // insert an anytype parameter. @@ -8880,6 +8886,11 @@ fn zirParam( }; const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) { error.GenericPoison => { + if (sema.inst_map.get(inst)) |_| { + // A generic function is about to evaluate to another generic function. + // Return an error instead. + return error.GenericPoison; + } // The type is not available until the generic instantiation. // We result the param instruction with a poison value and // insert an anytype parameter. diff --git a/test/behavior/call.zig b/test/behavior/call.zig index a8d0d40751f6..4addd93227f6 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -369,3 +369,15 @@ test "Enum constructed by @Type passed as generic argument" { try S.foo(@intToEnum(S.E, i), i); } } + +test "generic function with generic function parameter" { + const S = struct { + fn f(comptime a: fn (anytype) anyerror!void, b: anytype) anyerror!void { + try a(b); + } + fn g(a: anytype) anyerror!void { + try expect(a == 123); + } + }; + try S.f(S.g, 123); +} From 9f23702c21645aeddd64bcf203bc8b62a328f75f Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Tue, 20 Dec 2022 18:32:06 +0200 Subject: [PATCH 8/8] llvm: fix C ABI for <=256 bit vectors Closes #13918 --- src/arch/x86_64/abi.zig | 3 ++- test/c_abi/cfuncs.c | 13 +++++++++++++ test/c_abi/main.zig | 17 +++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index 393d4db3d572..54c08e4aa933 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -143,7 +143,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { .integer, .integer, .integer, .integer, .integer, .integer, .integer, .integer, }; - if (has_avx512 and bit_size <= 256) return .{ + const has_avx = target.cpu.features.isEnabled(@enumToInt(std.Target.x86.Feature.avx)); + if (has_avx and bit_size <= 256) return .{ .integer, .integer, .integer, .integer, .none, .none, .none, .none, }; diff --git a/test/c_abi/cfuncs.c b/test/c_abi/cfuncs.c index 20896669fe7a..16851f1c7e64 100644 --- a/test/c_abi/cfuncs.c +++ b/test/c_abi/cfuncs.c @@ -742,6 +742,19 @@ SmallVec c_ret_small_vec(void) { return (SmallVec){3, 4}; } +typedef size_t MediumVec __attribute__((vector_size(4 * sizeof(size_t)))); + +void c_medium_vec(MediumVec vec) { + assert_or_panic(vec[0] == 1); + assert_or_panic(vec[1] == 2); + assert_or_panic(vec[2] == 3); + assert_or_panic(vec[3] == 4); +} + +MediumVec c_ret_medium_vec(void) { + return (MediumVec){5, 6, 7, 8}; +} + typedef size_t BigVec __attribute__((vector_size(8 * sizeof(size_t)))); void c_big_vec(BigVec vec) { diff --git a/test/c_abi/main.zig b/test/c_abi/main.zig index 47f045574444..dcf4cbe46f68 100644 --- a/test/c_abi/main.zig +++ b/test/c_abi/main.zig @@ -801,6 +801,23 @@ test "small simd vector" { try expect(x[1] == 4); } +const MediumVec = @Vector(4, usize); + +extern fn c_medium_vec(MediumVec) void; +extern fn c_ret_medium_vec() MediumVec; + +test "medium simd vector" { + if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest; + + c_medium_vec(.{ 1, 2, 3, 4 }); + + var x = c_ret_medium_vec(); + try expect(x[0] == 5); + try expect(x[1] == 6); + try expect(x[2] == 7); + try expect(x[3] == 8); +} + const BigVec = @Vector(8, usize); extern fn c_big_vec(BigVec) void;