diff --git a/doc/langref.html.in b/doc/langref.html.in index 374fbfcde..8a303640e 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -7673,6 +7673,43 @@ test "@setRuntimeSafety" { {#see_also|@shlExact|@shlWithOverflow#} {#header_close#} + {#header_open|@shuffle#} +
{#syntax#}@shuffle(comptime E: type, a: @Vector(a_len, E), b: @Vector(b_len, E), comptime mask: @Vector(mask_len, i32)) @Vector(mask_len, E){#endsyntax#}
+

+ Constructs a new {#link|vector|Vectors#} by selecting elements from {#syntax#}a{#endsyntax#} and + {#syntax#}b{#endsyntax#} based on {#syntax#}mask{#endsyntax#}. +

+

+ Each element in {#syntax#}mask{#endsyntax#} selects an element from either {#syntax#}a{#endsyntax#} or + {#syntax#}b{#endsyntax#}. Positive numbers select from {#syntax#}a{#endsyntax#} starting at 0. + Negative values select from {#syntax#}b{#endsyntax#}, starting at {#syntax#}-1{#endsyntax#} and going down. + It is recommended to use the {#syntax#}~{#endsyntax#} operator from indexes from {#syntax#}b{#endsyntax#} + so that both indexes can start from {#syntax#}0{#endsyntax#} (i.e. {#syntax#}~i32(0){#endsyntax#} is + {#syntax#}-1{#endsyntax#}). +

+

+ For each element of {#syntax#}mask{#endsyntax#}, if it or the selected value from + {#syntax#}a{#endsyntax#} or {#syntax#}b{#endsyntax#} is {#syntax#}undefined{#endsyntax#}, + then the resulting element is {#syntax#}undefined{#endsyntax#}. +

+

+ {#syntax#}a_len{#endsyntax#} and {#syntax#}b_len{#endsyntax#} may differ in length. Out-of-bounds element + indexes in {#syntax#}mask{#endsyntax#} result in compile errors. +

+

+ If {#syntax#}a{#endsyntax#} or {#syntax#}b{#endsyntax#} is {#syntax#}undefined{#endsyntax#}, it + is equivalent to a vector of all {#syntax#}undefined{#endsyntax#} with the same length as the other vector. + If both vectors are {#syntax#}undefined{#endsyntax#}, {#syntax#}@shuffle{#endsyntax#} returns + a vector with all elements {#syntax#}undefined{#endsyntax#}. +

+

+ {#syntax#}E{#endsyntax#} must be an {#link|integer|Integers#}, {#link|float|Floats#}, + {#link|pointer|Pointers#}, or {#syntax#}bool{#endsyntax#}. The mask may be any vector length, and its + length determines the result length. +

+ {#see_also|SIMD#} + {#header_close#} + {#header_open|@sizeOf#}
{#syntax#}@sizeOf(comptime T: type) comptime_int{#endsyntax#}

diff --git a/src/all_types.hpp b/src/all_types.hpp index 60b292662..deb56cbb4 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1351,7 +1351,7 @@ struct ZigTypeBoundFn { }; struct ZigTypeVector { - // The type must be a pointer, integer, or float + // The type must be a pointer, integer, bool, or float ZigType *elem_type; uint32_t len; }; @@ -1611,6 +1611,7 @@ enum BuiltinFnId { BuiltinFnIdIntToEnum, BuiltinFnIdIntType, BuiltinFnIdVectorType, + BuiltinFnIdShuffle, BuiltinFnIdSetCold, BuiltinFnIdSetRuntimeSafety, BuiltinFnIdSetFloatMode, @@ -2428,6 +2429,7 @@ enum IrInstructionId { IrInstructionIdBoolToInt, IrInstructionIdIntType, IrInstructionIdVectorType, + IrInstructionIdShuffleVector, IrInstructionIdBoolNot, IrInstructionIdMemset, IrInstructionIdMemcpy, @@ -3669,6 +3671,15 @@ struct IrInstructionVectorToArray { IrInstruction *result_loc; }; +struct IrInstructionShuffleVector { + IrInstruction base; + + IrInstruction *scalar_type; + IrInstruction *a; + IrInstruction *b; + IrInstruction *mask; // This is in zig-format, not llvm format +}; + struct IrInstructionAssertZero { IrInstruction base; diff --git a/src/analyze.cpp b/src/analyze.cpp index d5d874501..ac70d5646 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4708,6 +4708,7 @@ ZigType *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) { bool is_valid_vector_elem_type(ZigType *elem_type) { return elem_type->id == ZigTypeIdInt || elem_type->id == ZigTypeIdFloat || + elem_type->id == ZigTypeIdBool || get_codegen_ptr_type(elem_type) != nullptr; } @@ -4727,7 +4728,7 @@ ZigType *get_vector_type(CodeGen *g, uint32_t len, ZigType *elem_type) { ZigType *entry = new_type_table_entry(ZigTypeIdVector); if ((len != 0) && type_has_bits(elem_type)) { - // Vectors can only be ints, floats, or pointers. ints and floats have trivially resolvable + // Vectors can only be ints, floats, bools, or pointers. ints (inc. bools) and floats have trivially resolvable // llvm type refs. pointers we will use usize instead. LLVMTypeRef example_vector_llvm_type; if (elem_type->id == ZigTypeIdPointer) { diff --git a/src/codegen.cpp b/src/codegen.cpp index 4799c0a28..7676b3bbd 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4581,6 +4581,36 @@ static LLVMValueRef ir_render_ctz(CodeGen *g, IrExecutable *executable, IrInstru return gen_widen_or_shorten(g, false, int_type, instruction->base.value.type, wrong_size_int); } +static LLVMValueRef ir_render_shuffle_vector(CodeGen *g, IrExecutable *executable, IrInstructionShuffleVector *instruction) { + uint64_t len_a = instruction->a->value.type->data.vector.len; + uint64_t len_mask = instruction->mask->value.type->data.vector.len; + + // LLVM uses integers larger than the length of the first array to + // index into the second array. This was deemed unnecessarily fragile + // when changing code, so Zig uses negative numbers to index the + // second vector. These start at -1 and go down, and are easiest to use + // with the ~ operator. Here we convert between the two formats. + IrInstruction *mask = instruction->mask; + LLVMValueRef *values = allocate(len_mask); + for (uint64_t i = 0; i < len_mask; i++) { + if (mask->value.data.x_array.data.s_none.elements[i].special == ConstValSpecialUndef) { + values[i] = LLVMGetUndef(LLVMInt32Type()); + } else { + int32_t v = bigint_as_signed(&mask->value.data.x_array.data.s_none.elements[i].data.x_bigint); + uint32_t index_val = (v >= 0) ? (uint32_t)v : (uint32_t)~v + (uint32_t)len_a; + values[i] = LLVMConstInt(LLVMInt32Type(), index_val, false); + } + } + + LLVMValueRef llvm_mask_value = LLVMConstVector(values, len_mask); + free(values); + + return LLVMBuildShuffleVector(g->builder, + ir_llvm_value(g, instruction->a), + ir_llvm_value(g, instruction->b), + llvm_mask_value, ""); +} + static LLVMValueRef ir_render_pop_count(CodeGen *g, IrExecutable *executable, IrInstructionPopCount *instruction) { ZigType *int_type = instruction->op->value.type; LLVMValueRef fn_val = get_int_builtin_fn(g, int_type, BuiltinFnIdPopCount); @@ -5549,10 +5579,29 @@ static LLVMValueRef ir_render_vector_to_array(CodeGen *g, IrExecutable *executab assert(handle_is_ptr(array_type)); LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); LLVMValueRef vector = ir_llvm_value(g, instruction->vector); - LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, result_loc, - LLVMPointerType(get_llvm_type(g, instruction->vector->value.type), 0), ""); - uint32_t alignment = get_ptr_align(g, instruction->result_loc->value.type); - gen_store_untyped(g, vector, casted_ptr, alignment, false); + + ZigType *elem_type = array_type->data.array.child_type; + bool bitcast_ok = (elem_type->size_in_bits * 8) == elem_type->abi_size; + if (bitcast_ok) { + LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, result_loc, + LLVMPointerType(get_llvm_type(g, instruction->vector->value.type), 0), ""); + uint32_t alignment = get_ptr_align(g, instruction->result_loc->value.type); + gen_store_untyped(g, vector, casted_ptr, alignment, false); + } else { + // If the ABI size of the element type is not evenly divisible by size_in_bits, a simple bitcast + // will not work, and we fall back to extractelement. + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMTypeRef u32_type_ref = LLVMInt32Type(); + LLVMValueRef zero = LLVMConstInt(usize_type_ref, 0, false); + for (uintptr_t i = 0; i < instruction->vector->value.type->data.vector.len; i++) { + LLVMValueRef index_usize = LLVMConstInt(usize_type_ref, i, false); + LLVMValueRef index_u32 = LLVMConstInt(u32_type_ref, i, false); + LLVMValueRef indexes[] = { zero, index_usize }; + LLVMValueRef elem_ptr = LLVMBuildInBoundsGEP(g->builder, result_loc, indexes, 2, ""); + LLVMValueRef elem = LLVMBuildExtractElement(g->builder, vector, index_u32, ""); + LLVMBuildStore(g->builder, elem, elem_ptr); + } + } return result_loc; } @@ -5563,12 +5612,34 @@ static LLVMValueRef ir_render_array_to_vector(CodeGen *g, IrExecutable *executab assert(vector_type->id == ZigTypeIdVector); assert(!handle_is_ptr(vector_type)); LLVMValueRef array_ptr = ir_llvm_value(g, instruction->array); - LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, array_ptr, - LLVMPointerType(get_llvm_type(g, vector_type), 0), ""); - ZigType *array_type = instruction->array->value.type; - assert(array_type->id == ZigTypeIdArray); - uint32_t alignment = get_abi_alignment(g, array_type->data.array.child_type); - return gen_load_untyped(g, casted_ptr, alignment, false, ""); + LLVMTypeRef vector_type_ref = get_llvm_type(g, vector_type); + + ZigType *elem_type = vector_type->data.vector.elem_type; + bool bitcast_ok = (elem_type->size_in_bits * 8) == elem_type->abi_size; + if (bitcast_ok) { + LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, array_ptr, + LLVMPointerType(vector_type_ref, 0), ""); + ZigType *array_type = instruction->array->value.type; + assert(array_type->id == ZigTypeIdArray); + uint32_t alignment = get_abi_alignment(g, array_type->data.array.child_type); + return gen_load_untyped(g, casted_ptr, alignment, false, ""); + } else { + // If the ABI size of the element type is not evenly divisible by size_in_bits, a simple bitcast + // will not work, and we fall back to insertelement. + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMTypeRef u32_type_ref = LLVMInt32Type(); + LLVMValueRef zero = LLVMConstInt(usize_type_ref, 0, false); + LLVMValueRef vector = LLVMGetUndef(vector_type_ref); + for (uintptr_t i = 0; i < instruction->base.value.type->data.vector.len; i++) { + LLVMValueRef index_usize = LLVMConstInt(usize_type_ref, i, false); + LLVMValueRef index_u32 = LLVMConstInt(u32_type_ref, i, false); + LLVMValueRef indexes[] = { zero, index_usize }; + LLVMValueRef elem_ptr = LLVMBuildInBoundsGEP(g->builder, array_ptr, indexes, 2, ""); + LLVMValueRef elem = LLVMBuildLoad(g->builder, elem_ptr, ""); + vector = LLVMBuildInsertElement(g->builder, vector, elem, index_u32, ""); + } + return vector; + } } static LLVMValueRef ir_render_assert_zero(CodeGen *g, IrExecutable *executable, @@ -6054,6 +6125,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_spill_begin(g, executable, (IrInstructionSpillBegin *)instruction); case IrInstructionIdSpillEnd: return ir_render_spill_end(g, executable, (IrInstructionSpillEnd *)instruction); + case IrInstructionIdShuffleVector: + return ir_render_shuffle_vector(g, executable, (IrInstructionShuffleVector *) instruction); } zig_unreachable(); } @@ -7744,6 +7817,7 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdCompileLog, "compileLog", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdIntType, "IntType", 2); // TODO rename to Int create_builtin_fn(g, BuiltinFnIdVectorType, "Vector", 2); + create_builtin_fn(g, BuiltinFnIdShuffle, "shuffle", 4); create_builtin_fn(g, BuiltinFnIdSetCold, "setCold", 1); create_builtin_fn(g, BuiltinFnIdSetRuntimeSafety, "setRuntimeSafety", 1); create_builtin_fn(g, BuiltinFnIdSetFloatMode, "setFloatMode", 1); diff --git a/src/ir.cpp b/src/ir.cpp index ea9039a1b..cbc00f0cf 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -717,6 +717,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionVectorType *) { return IrInstructionIdVectorType; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionShuffleVector *) { + return IrInstructionIdShuffleVector; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionBoolNot *) { return IrInstructionIdBoolNot; } @@ -2277,6 +2281,25 @@ static IrInstruction *ir_build_vector_type(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_shuffle_vector(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *scalar_type, IrInstruction *a, IrInstruction *b, IrInstruction *mask) +{ + IrInstructionShuffleVector *instruction = ir_build_instruction(irb, scope, source_node); + instruction->scalar_type = scalar_type; + instruction->a = a; + instruction->b = b; + instruction->mask = mask; + + if (scalar_type != nullptr) { + ir_ref_instruction(scalar_type, irb->current_basic_block); + } + ir_ref_instruction(a, irb->current_basic_block); + ir_ref_instruction(b, irb->current_basic_block); + ir_ref_instruction(mask, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_bool_not(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) { IrInstructionBoolNot *instruction = ir_build_instruction(irb, scope, source_node); instruction->value = value; @@ -4936,6 +4959,32 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo IrInstruction *vector_type = ir_build_vector_type(irb, scope, node, arg0_value, arg1_value); return ir_lval_wrap(irb, scope, vector_type, lval, result_loc); } + case BuiltinFnIdShuffle: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + AstNode *arg2_node = node->data.fn_call_expr.params.at(2); + IrInstruction *arg2_value = ir_gen_node(irb, arg2_node, scope); + if (arg2_value == irb->codegen->invalid_instruction) + return arg2_value; + + AstNode *arg3_node = node->data.fn_call_expr.params.at(3); + IrInstruction *arg3_value = ir_gen_node(irb, arg3_node, scope); + if (arg3_value == irb->codegen->invalid_instruction) + return arg3_value; + + IrInstruction *shuffle_vector = ir_build_shuffle_vector(irb, scope, node, + arg0_value, arg1_value, arg2_value, arg3_value); + return ir_lval_wrap(irb, scope, shuffle_vector, lval, result_loc); + } case BuiltinFnIdMemcpy: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -11000,6 +11049,19 @@ static ZigType *ir_resolve_type(IrAnalyze *ira, IrInstruction *type_value) { return ir_resolve_const_type(ira->codegen, ira->new_irb.exec, type_value->source_node, val); } +static ZigType *ir_resolve_vector_elem_type(IrAnalyze *ira, IrInstruction *elem_type_value) { + ZigType *elem_type = ir_resolve_type(ira, elem_type_value); + if (type_is_invalid(elem_type)) + return ira->codegen->builtin_types.entry_invalid; + if (!is_valid_vector_elem_type(elem_type)) { + ir_add_error(ira, elem_type_value, + buf_sprintf("vector element type must be integer, float, bool, or pointer; '%s' is invalid", + buf_ptr(&elem_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + return elem_type; +} + static ZigType *ir_resolve_int_type(IrAnalyze *ira, IrInstruction *type_value) { ZigType *ty = ir_resolve_type(ira, type_value); if (type_is_invalid(ty)) @@ -13092,6 +13154,59 @@ static bool optional_value_is_null(ConstExprValue *val) { } } +static IrInstruction *ir_evaluate_bin_op_cmp(IrAnalyze *ira, ZigType *resolved_type, + ConstExprValue *op1_val, ConstExprValue *op2_val, IrInstructionBinOp *bin_op_instruction, IrBinOp op_id, + bool one_possible_value) { + if (op1_val->special == ConstValSpecialUndef || + op2_val->special == ConstValSpecialUndef) + return ir_const_undef(ira, &bin_op_instruction->base, resolved_type); + if (resolved_type->id == ZigTypeIdComptimeFloat || resolved_type->id == ZigTypeIdFloat) { + if (float_is_nan(op1_val) || float_is_nan(op2_val)) { + return ir_const_bool(ira, &bin_op_instruction->base, op_id == IrBinOpCmpNotEq); + } + Cmp cmp_result = float_cmp(op1_val, op2_val); + bool answer = resolve_cmp_op_id(op_id, cmp_result); + return ir_const_bool(ira, &bin_op_instruction->base, answer); + } else if (resolved_type->id == ZigTypeIdComptimeInt || resolved_type->id == ZigTypeIdInt) { + Cmp cmp_result = bigint_cmp(&op1_val->data.x_bigint, &op2_val->data.x_bigint); + bool answer = resolve_cmp_op_id(op_id, cmp_result); + return ir_const_bool(ira, &bin_op_instruction->base, answer); + } else if (resolved_type->id == ZigTypeIdPointer && op_id != IrBinOpCmpEq && op_id != IrBinOpCmpNotEq) { + if ((op1_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr || + op1_val->data.x_ptr.special == ConstPtrSpecialNull) && + (op2_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr || + op2_val->data.x_ptr.special == ConstPtrSpecialNull)) + { + uint64_t op1_addr = op1_val->data.x_ptr.special == ConstPtrSpecialNull ? + 0 : op1_val->data.x_ptr.data.hard_coded_addr.addr; + uint64_t op2_addr = op2_val->data.x_ptr.special == ConstPtrSpecialNull ? + 0 : op2_val->data.x_ptr.data.hard_coded_addr.addr; + Cmp cmp_result; + if (op1_addr > op2_addr) { + cmp_result = CmpGT; + } else if (op1_addr < op2_addr) { + cmp_result = CmpLT; + } else { + cmp_result = CmpEQ; + } + bool answer = resolve_cmp_op_id(op_id, cmp_result); + return ir_const_bool(ira, &bin_op_instruction->base, answer); + } + } else { + bool are_equal = one_possible_value || const_values_equal(ira->codegen, op1_val, op2_val); + bool answer; + if (op_id == IrBinOpCmpEq) { + answer = are_equal; + } else if (op_id == IrBinOpCmpNotEq) { + answer = !are_equal; + } else { + zig_unreachable(); + } + return ir_const_bool(ira, &bin_op_instruction->base, answer); + } + zig_unreachable(); +} + // Returns ErrorNotLazy when the value cannot be determined static Error lazy_cmp_zero(AstNode *source_node, ConstExprValue *val, Cmp *result) { Error err; @@ -13477,51 +13592,22 @@ never_mind_just_calculate_it_normally: ConstExprValue *op2_val = one_possible_value ? &casted_op2->value : ir_resolve_const(ira, casted_op2, UndefBad); if (op2_val == nullptr) return ira->codegen->invalid_instruction; + if (resolved_type->id != ZigTypeIdVector) + return ir_evaluate_bin_op_cmp(ira, resolved_type, op1_val, op2_val, bin_op_instruction, op_id, one_possible_value); + IrInstruction *result = ir_const(ira, &bin_op_instruction->base, + get_vector_type(ira->codegen, resolved_type->data.vector.len, ira->codegen->builtin_types.entry_bool)); + result->value.data.x_array.data.s_none.elements = + create_const_vals(resolved_type->data.vector.len); - if (resolved_type->id == ZigTypeIdComptimeFloat || resolved_type->id == ZigTypeIdFloat) { - if (float_is_nan(op1_val) || float_is_nan(op2_val)) { - return ir_const_bool(ira, &bin_op_instruction->base, op_id == IrBinOpCmpNotEq); - } - Cmp cmp_result = float_cmp(op1_val, op2_val); - bool answer = resolve_cmp_op_id(op_id, cmp_result); - return ir_const_bool(ira, &bin_op_instruction->base, answer); - } else if (resolved_type->id == ZigTypeIdComptimeInt || resolved_type->id == ZigTypeIdInt) { - Cmp cmp_result = bigint_cmp(&op1_val->data.x_bigint, &op2_val->data.x_bigint); - bool answer = resolve_cmp_op_id(op_id, cmp_result); - return ir_const_bool(ira, &bin_op_instruction->base, answer); - } else if (resolved_type->id == ZigTypeIdPointer && op_id != IrBinOpCmpEq && op_id != IrBinOpCmpNotEq) { - if ((op1_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr || - op1_val->data.x_ptr.special == ConstPtrSpecialNull) && - (op2_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr || - op2_val->data.x_ptr.special == ConstPtrSpecialNull)) - { - uint64_t op1_addr = op1_val->data.x_ptr.special == ConstPtrSpecialNull ? - 0 : op1_val->data.x_ptr.data.hard_coded_addr.addr; - uint64_t op2_addr = op2_val->data.x_ptr.special == ConstPtrSpecialNull ? - 0 : op2_val->data.x_ptr.data.hard_coded_addr.addr; - Cmp cmp_result; - if (op1_addr > op2_addr) { - cmp_result = CmpGT; - } else if (op1_addr < op2_addr) { - cmp_result = CmpLT; - } else { - cmp_result = CmpEQ; - } - bool answer = resolve_cmp_op_id(op_id, cmp_result); - return ir_const_bool(ira, &bin_op_instruction->base, answer); - } - } else { - bool are_equal = one_possible_value || const_values_equal(ira->codegen, op1_val, op2_val); - bool answer; - if (op_id == IrBinOpCmpEq) { - answer = are_equal; - } else if (op_id == IrBinOpCmpNotEq) { - answer = !are_equal; - } else { - zig_unreachable(); - } - return ir_const_bool(ira, &bin_op_instruction->base, answer); + expand_undef_array(ira->codegen, &result->value); + for (size_t i = 0;i < resolved_type->data.vector.len;i++) { + IrInstruction *cur_res = ir_evaluate_bin_op_cmp(ira, resolved_type->data.vector.elem_type, + &op1_val->data.x_array.data.s_none.elements[i], + &op2_val->data.x_array.data.s_none.elements[i], + bin_op_instruction, op_id, one_possible_value); + copy_const_val(&result->value.data.x_array.data.s_none.elements[i], &cur_res->value, false); } + return result; } // some comparisons with unsigned numbers can be evaluated @@ -13564,7 +13650,12 @@ never_mind_just_calculate_it_normally: IrInstruction *result = ir_build_bin_op(&ira->new_irb, bin_op_instruction->base.scope, bin_op_instruction->base.source_node, op_id, casted_op1, casted_op2, bin_op_instruction->safety_check_on); - result->value.type = ira->codegen->builtin_types.entry_bool; + if (resolved_type->id == ZigTypeIdVector) { + result->value.type = get_vector_type(ira->codegen, resolved_type->data.vector.len, + ira->codegen->builtin_types.entry_bool); + } else { + result->value.type = ira->codegen->builtin_types.entry_bool; + } return result; } @@ -22018,22 +22109,214 @@ static IrInstruction *ir_analyze_instruction_vector_type(IrAnalyze *ira, IrInstr if (!ir_resolve_unsigned(ira, instruction->len->child, ira->codegen->builtin_types.entry_u32, &len)) return ira->codegen->invalid_instruction; - ZigType *elem_type = ir_resolve_type(ira, instruction->elem_type->child); + ZigType *elem_type = ir_resolve_vector_elem_type(ira, instruction->elem_type->child); if (type_is_invalid(elem_type)) return ira->codegen->invalid_instruction; - if (!is_valid_vector_elem_type(elem_type)) { - ir_add_error(ira, instruction->elem_type, - buf_sprintf("vector element type must be integer, float, or pointer; '%s' is invalid", - buf_ptr(&elem_type->name))); - return ira->codegen->invalid_instruction; - } - ZigType *vector_type = get_vector_type(ira->codegen, len, elem_type); return ir_const_type(ira, &instruction->base, vector_type); } +static IrInstruction *ir_analyze_shuffle_vector(IrAnalyze *ira, IrInstruction *source_instr, + ZigType *scalar_type, IrInstruction *a, IrInstruction *b, IrInstruction *mask) +{ + ir_assert(source_instr && scalar_type && a && b && mask, source_instr); + ir_assert(is_valid_vector_elem_type(scalar_type), source_instr); + + uint32_t len_mask; + if (mask->value.type->id == ZigTypeIdVector) { + len_mask = mask->value.type->data.vector.len; + } else if (mask->value.type->id == ZigTypeIdArray) { + len_mask = mask->value.type->data.array.len; + } else { + ir_add_error(ira, mask, + buf_sprintf("expected vector or array, found '%s'", + buf_ptr(&mask->value.type->name))); + return ira->codegen->invalid_instruction; + } + mask = ir_implicit_cast(ira, mask, get_vector_type(ira->codegen, len_mask, + ira->codegen->builtin_types.entry_i32)); + if (type_is_invalid(mask->value.type)) + return ira->codegen->invalid_instruction; + + uint32_t len_a; + if (a->value.type->id == ZigTypeIdVector) { + len_a = a->value.type->data.vector.len; + } else if (a->value.type->id == ZigTypeIdArray) { + len_a = a->value.type->data.array.len; + } else if (a->value.type->id == ZigTypeIdUndefined) { + len_a = UINT32_MAX; + } else { + ir_add_error(ira, a, + buf_sprintf("expected vector or array with element type '%s', found '%s'", + buf_ptr(&scalar_type->name), + buf_ptr(&a->value.type->name))); + return ira->codegen->invalid_instruction; + } + + uint32_t len_b; + if (b->value.type->id == ZigTypeIdVector) { + len_b = b->value.type->data.vector.len; + } else if (b->value.type->id == ZigTypeIdArray) { + len_b = b->value.type->data.array.len; + } else if (b->value.type->id == ZigTypeIdUndefined) { + len_b = UINT32_MAX; + } else { + ir_add_error(ira, b, + buf_sprintf("expected vector or array with element type '%s', found '%s'", + buf_ptr(&scalar_type->name), + buf_ptr(&b->value.type->name))); + return ira->codegen->invalid_instruction; + } + + if (len_a == UINT32_MAX && len_b == UINT32_MAX) { + return ir_const_undef(ira, a, get_vector_type(ira->codegen, len_mask, scalar_type)); + } + + if (len_a == UINT32_MAX) { + len_a = len_b; + a = ir_const_undef(ira, a, get_vector_type(ira->codegen, len_a, scalar_type)); + } else { + a = ir_implicit_cast(ira, a, get_vector_type(ira->codegen, len_a, scalar_type)); + if (type_is_invalid(a->value.type)) + return ira->codegen->invalid_instruction; + } + + if (len_b == UINT32_MAX) { + len_b = len_a; + b = ir_const_undef(ira, b, get_vector_type(ira->codegen, len_b, scalar_type)); + } else { + b = ir_implicit_cast(ira, b, get_vector_type(ira->codegen, len_b, scalar_type)); + if (type_is_invalid(b->value.type)) + return ira->codegen->invalid_instruction; + } + + ConstExprValue *mask_val = ir_resolve_const(ira, mask, UndefOk); + if (mask_val == nullptr) + return ira->codegen->invalid_instruction; + + expand_undef_array(ira->codegen, mask_val); + + for (uint32_t i = 0; i < len_mask; i += 1) { + ConstExprValue *mask_elem_val = &mask_val->data.x_array.data.s_none.elements[i]; + if (mask_elem_val->special == ConstValSpecialUndef) + continue; + int32_t v_i32 = bigint_as_signed(&mask_elem_val->data.x_bigint); + uint32_t v; + IrInstruction *chosen_operand; + if (v_i32 >= 0) { + v = (uint32_t)v_i32; + chosen_operand = a; + } else { + v = (uint32_t)~v_i32; + chosen_operand = b; + } + if (v >= chosen_operand->value.type->data.vector.len) { + ErrorMsg *msg = ir_add_error(ira, mask, + buf_sprintf("mask index '%u' has out-of-bounds selection", i)); + add_error_note(ira->codegen, msg, chosen_operand->source_node, + buf_sprintf("selected index '%u' out of bounds of %s", v, + buf_ptr(&chosen_operand->value.type->name))); + if (chosen_operand == a && v < len_a + len_b) { + add_error_note(ira->codegen, msg, b->source_node, + buf_create_from_str("selections from the second vector are specified with negative numbers")); + } + return ira->codegen->invalid_instruction; + } + } + + ZigType *result_type = get_vector_type(ira->codegen, len_mask, scalar_type); + if (instr_is_comptime(a) && instr_is_comptime(b)) { + ConstExprValue *a_val = ir_resolve_const(ira, a, UndefOk); + if (a_val == nullptr) + return ira->codegen->invalid_instruction; + + ConstExprValue *b_val = ir_resolve_const(ira, b, UndefOk); + if (b_val == nullptr) + return ira->codegen->invalid_instruction; + + expand_undef_array(ira->codegen, a_val); + expand_undef_array(ira->codegen, b_val); + + IrInstruction *result = ir_const(ira, source_instr, result_type); + result->value.data.x_array.data.s_none.elements = create_const_vals(len_mask); + for (uint32_t i = 0; i < mask_val->type->data.vector.len; i += 1) { + ConstExprValue *mask_elem_val = &mask_val->data.x_array.data.s_none.elements[i]; + ConstExprValue *result_elem_val = &result->value.data.x_array.data.s_none.elements[i]; + if (mask_elem_val->special == ConstValSpecialUndef) { + result_elem_val->special = ConstValSpecialUndef; + continue; + } + int32_t v = bigint_as_signed(&mask_elem_val->data.x_bigint); + // We've already checked for and emitted compile errors for index out of bounds here. + ConstExprValue *src_elem_val = (v >= 0) ? + &a->value.data.x_array.data.s_none.elements[v] : + &b->value.data.x_array.data.s_none.elements[~v]; + copy_const_val(result_elem_val, src_elem_val, false); + + ir_assert(result_elem_val->special == ConstValSpecialStatic, source_instr); + } + result->value.special = ConstValSpecialStatic; + return result; + } + + // All static analysis passed, and not comptime. + // For runtime codegen, vectors a and b must be the same length. Here we + // recursively @shuffle the smaller vector to append undefined elements + // to it up to the length of the longer vector. This recursion terminates + // in 1 call because these calls to ir_analyze_shuffle_vector guarantee + // len_a == len_b. + if (len_a != len_b) { + uint32_t len_min = min(len_a, len_b); + uint32_t len_max = max(len_a, len_b); + + IrInstruction *expand_mask = ir_const(ira, mask, + get_vector_type(ira->codegen, len_max, ira->codegen->builtin_types.entry_i32)); + expand_mask->value.data.x_array.data.s_none.elements = create_const_vals(len_max); + uint32_t i = 0; + for (; i < len_min; i += 1) + bigint_init_unsigned(&expand_mask->value.data.x_array.data.s_none.elements[i].data.x_bigint, i); + for (; i < len_max; i += 1) + bigint_init_signed(&expand_mask->value.data.x_array.data.s_none.elements[i].data.x_bigint, -1); + + IrInstruction *undef = ir_const_undef(ira, source_instr, + get_vector_type(ira->codegen, len_min, scalar_type)); + + if (len_b < len_a) { + b = ir_analyze_shuffle_vector(ira, source_instr, scalar_type, b, undef, expand_mask); + } else { + a = ir_analyze_shuffle_vector(ira, source_instr, scalar_type, a, undef, expand_mask); + } + } + + IrInstruction *result = ir_build_shuffle_vector(&ira->new_irb, + source_instr->scope, source_instr->source_node, + nullptr, a, b, mask); + result->value.type = result_type; + return result; +} + +static IrInstruction *ir_analyze_instruction_shuffle_vector(IrAnalyze *ira, IrInstructionShuffleVector *instruction) { + ZigType *scalar_type = ir_resolve_vector_elem_type(ira, instruction->scalar_type); + if (type_is_invalid(scalar_type)) + return ira->codegen->invalid_instruction; + + IrInstruction *a = instruction->a->child; + if (type_is_invalid(a->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *b = instruction->b->child; + if (type_is_invalid(b->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *mask = instruction->mask->child; + if (type_is_invalid(mask->value.type)) + return ira->codegen->invalid_instruction; + + return ir_analyze_shuffle_vector(ira, &instruction->base, scalar_type, a, b, mask); +} + static IrInstruction *ir_analyze_instruction_bool_not(IrAnalyze *ira, IrInstructionBoolNot *instruction) { IrInstruction *value = instruction->value->child; if (type_is_invalid(value->value.type)) @@ -25578,6 +25861,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_int_type(ira, (IrInstructionIntType *)instruction); case IrInstructionIdVectorType: return ir_analyze_instruction_vector_type(ira, (IrInstructionVectorType *)instruction); + case IrInstructionIdShuffleVector: + return ir_analyze_instruction_shuffle_vector(ira, (IrInstructionShuffleVector *)instruction); case IrInstructionIdBoolNot: return ir_analyze_instruction_bool_not(ira, (IrInstructionBoolNot *)instruction); case IrInstructionIdMemset: @@ -25913,6 +26198,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdTruncate: case IrInstructionIdIntType: case IrInstructionIdVectorType: + case IrInstructionIdShuffleVector: case IrInstructionIdBoolNot: case IrInstructionIdSliceSrc: case IrInstructionIdMemberCount: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index f2877b46e..8561ed450 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -42,6 +42,8 @@ static const char* ir_instruction_type_str(IrInstruction* instruction) { switch (instruction->id) { case IrInstructionIdInvalid: return "Invalid"; + case IrInstructionIdShuffleVector: + return "Shuffle"; case IrInstructionIdDeclVarSrc: return "DeclVarSrc"; case IrInstructionIdDeclVarGen: @@ -1208,6 +1210,18 @@ static void ir_print_vector_type(IrPrint *irp, IrInstructionVectorType *instruct fprintf(irp->f, ")"); } +static void ir_print_shuffle_vector(IrPrint *irp, IrInstructionShuffleVector *instruction) { + fprintf(irp->f, "@shuffle("); + ir_print_other_instruction(irp, instruction->scalar_type); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->a); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->b); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->mask); + fprintf(irp->f, ")"); +} + static void ir_print_bool_not(IrPrint *irp, IrInstructionBoolNot *instruction) { fprintf(irp->f, "! "); ir_print_other_instruction(irp, instruction->value); @@ -2143,6 +2157,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction, bool case IrInstructionIdVectorType: ir_print_vector_type(irp, (IrInstructionVectorType *)instruction); break; + case IrInstructionIdShuffleVector: + ir_print_shuffle_vector(irp, (IrInstructionShuffleVector *)instruction); + break; case IrInstructionIdBoolNot: ir_print_bool_not(irp, (IrInstructionBoolNot *)instruction); break; diff --git a/std/hash/auto_hash.zig b/std/hash/auto_hash.zig index d34fc2719..8a22788e5 100644 --- a/std/hash/auto_hash.zig +++ b/std/hash/auto_hash.zig @@ -116,7 +116,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void { // Otherwise, hash every element. // TODO remove the copy to an array once field access is done. const array: [info.len]info.child = key; - comptime var i: u32 = 0; + comptime var i = 0; inline while (i < info.len) : (i += 1) { hash(hasher, array[i], strat); } @@ -357,10 +357,13 @@ test "testHash union" { test "testHash vector" { const a: @Vector(4, u32) = [_]u32{ 1, 2, 3, 4 }; const b: @Vector(4, u32) = [_]u32{ 1, 2, 3, 5 }; - const c: @Vector(4, u31) = [_]u31{ 1, 2, 3, 4 }; testing.expect(testHash(a) == testHash(a)); testing.expect(testHash(a) != testHash(b)); - testing.expect(testHash(a) != testHash(c)); + + const c: @Vector(4, u31) = [_]u31{ 1, 2, 3, 4 }; + const d: @Vector(4, u31) = [_]u31{ 1, 2, 3, 5 }; + testing.expect(testHash(c) == testHash(c)); + testing.expect(testHash(c) != testHash(d)); } test "testHash error union" { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 6365ca64c..1fe3fc58a 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -6484,6 +6484,19 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { "tmp.zig:7:23: error: unable to evaluate constant expression", ); + cases.addTest( + "@shuffle with selected index past first vector length", + \\export fn entry() void { + \\ const v: @Vector(4, u32) = [4]u32{ 10, 11, 12, 13 }; + \\ const x: @Vector(4, u32) = [4]u32{ 14, 15, 16, 17 }; + \\ var z = @shuffle(u32, v, x, [8]i32{ 0, 1, 2, 3, 7, 6, 5, 4 }); + \\} + , + "tmp.zig:4:39: error: mask index '4' has out-of-bounds selection", + "tmp.zig:4:27: note: selected index '7' out of bounds of @Vector(4, u32)", + "tmp.zig:4:30: note: selections from the second vector are specified with negative numbers", + ); + cases.addTest( "nested vectors", \\export fn entry() void { @@ -6491,7 +6504,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ var v: V = undefined; \\} , - "tmp.zig:2:26: error: vector element type must be integer, float, or pointer; '@Vector(4, u8)' is invalid", + "tmp.zig:2:26: error: vector element type must be integer, float, bool, or pointer; '@Vector(4, u8)' is invalid", ); cases.add("compileLog of tagged enum doesn't crash the compiler", diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig index db6cdad3b..e56fc7ba7 100644 --- a/test/stage1/behavior.zig +++ b/test/stage1/behavior.zig @@ -80,6 +80,7 @@ comptime { _ = @import("behavior/pub_enum.zig"); _ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig"); _ = @import("behavior/reflection.zig"); + _ = @import("behavior/shuffle.zig"); _ = @import("behavior/sizeof_and_typeof.zig"); _ = @import("behavior/slice.zig"); _ = @import("behavior/slicetobytes.zig"); diff --git a/test/stage1/behavior/shuffle.zig b/test/stage1/behavior/shuffle.zig new file mode 100644 index 000000000..2029ec582 --- /dev/null +++ b/test/stage1/behavior/shuffle.zig @@ -0,0 +1,57 @@ +const std = @import("std"); +const mem = std.mem; +const expect = std.testing.expect; + +test "@shuffle" { + const S = struct { + fn doTheTest() void { + var v: @Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; + var x: @Vector(4, i32) = [4]i32{ 1, 2147483647, 3, 4 }; + const mask: @Vector(4, i32) = [4]i32{ 0, ~i32(2), 3, ~i32(3) }; + var res = @shuffle(i32, v, x, mask); + expect(mem.eql(i32, ([4]i32)(res), [4]i32{ 2147483647, 3, 40, 4 })); + + // Implicit cast from array (of mask) + res = @shuffle(i32, v, x, [4]i32{ 0, ~i32(2), 3, ~i32(3) }); + expect(mem.eql(i32, ([4]i32)(res), [4]i32{ 2147483647, 3, 40, 4 })); + + // Undefined + const mask2: @Vector(4, i32) = [4]i32{ 3, 1, 2, 0 }; + res = @shuffle(i32, v, undefined, mask2); + expect(mem.eql(i32, ([4]i32)(res), [4]i32{ 40, -2, 30, 2147483647 })); + + // Upcasting of b + var v2: @Vector(2, i32) = [2]i32{ 2147483647, undefined }; + const mask3: @Vector(4, i32) = [4]i32{ ~i32(0), 2, ~i32(0), 3 }; + res = @shuffle(i32, x, v2, mask3); + expect(mem.eql(i32, ([4]i32)(res), [4]i32{ 2147483647, 3, 2147483647, 4 })); + + // Upcasting of a + var v3: @Vector(2, i32) = [2]i32{ 2147483647, -2 }; + const mask4: @Vector(4, i32) = [4]i32{ 0, ~i32(2), 1, ~i32(3) }; + res = @shuffle(i32, v3, x, mask4); + expect(mem.eql(i32, ([4]i32)(res), [4]i32{ 2147483647, 3, -2, 4 })); + + // bool + { + var x2: @Vector(4, bool) = [4]bool{ false, true, false, true }; + var v4: @Vector(2, bool) = [2]bool{ true, false }; + const mask5: @Vector(4, i32) = [4]i32{ 0, ~i32(1), 1, 2 }; + var res2 = @shuffle(bool, x2, v4, mask5); + expect(mem.eql(bool, ([4]bool)(res2), [4]bool{ false, false, true, false })); + } + + // TODO re-enable when LLVM codegen is fixed + // https://github.com/ziglang/zig/issues/3246 + if (false) { + var x2: @Vector(3, bool) = [3]bool{ false, true, false }; + var v4: @Vector(2, bool) = [2]bool{ true, false }; + const mask5: @Vector(4, i32) = [4]i32{ 0, ~i32(1), 1, 2 }; + var res2 = @shuffle(bool, x2, v4, mask5); + expect(mem.eql(bool, ([4]bool)(res2), [4]bool{ false, false, true, false })); + } + } + }; + S.doTheTest(); + comptime S.doTheTest(); +} diff --git a/test/stage1/behavior/vector.zig b/test/stage1/behavior/vector.zig index 431e3fe27..27277b5e5 100644 --- a/test/stage1/behavior/vector.zig +++ b/test/stage1/behavior/vector.zig @@ -2,6 +2,18 @@ const std = @import("std"); const mem = std.mem; const expect = std.testing.expect; +test "implicit cast vector to array - bool" { + const S = struct { + fn doTheTest() void { + const a: @Vector(4, bool) = [_]bool{ true, false, true, false }; + const result_array: [4]bool = a; + expect(mem.eql(bool, result_array, [4]bool{ true, false, true, false })); + } + }; + S.doTheTest(); + comptime S.doTheTest(); +} + test "vector wrap operators" { const S = struct { fn doTheTest() void { @@ -18,6 +30,23 @@ test "vector wrap operators" { comptime S.doTheTest(); } +test "vector bin compares with mem.eql" { + const S = struct { + fn doTheTest() void { + var v: @Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; + var x: @Vector(4, i32) = [4]i32{ 1, 2147483647, 30, 4 }; + expect(mem.eql(bool, ([4]bool)(v == x), [4]bool{ false, false, true, false})); + expect(mem.eql(bool, ([4]bool)(v != x), [4]bool{ true, true, false, true})); + expect(mem.eql(bool, ([4]bool)(v < x), [4]bool{ false, true, false, false})); + expect(mem.eql(bool, ([4]bool)(v > x), [4]bool{ true, false, false, true})); + expect(mem.eql(bool, ([4]bool)(v <= x), [4]bool{ false, true, true, false})); + expect(mem.eql(bool, ([4]bool)(v >= x), [4]bool{ true, false, true, true})); + } + }; + S.doTheTest(); + comptime S.doTheTest(); +} + test "vector int operators" { const S = struct { fn doTheTest() void { @@ -80,3 +109,32 @@ test "array to vector" { var arr = [4]f32{ foo, 1.5, 0.0, 0.0 }; var vec: @Vector(4, f32) = arr; } + +test "vector casts of sizes not divisable by 8" { + const S = struct { + fn doTheTest() void { + { + var v: @Vector(4, u3) = [4]u3{ 5, 2, 3, 0}; + var x: [4]u3 = v; + expect(mem.eql(u3, x, ([4]u3)(v))); + } + { + var v: @Vector(4, u2) = [4]u2{ 1, 2, 3, 0}; + var x: [4]u2 = v; + expect(mem.eql(u2, x, ([4]u2)(v))); + } + { + var v: @Vector(4, u1) = [4]u1{ 1, 0, 1, 0}; + var x: [4]u1 = v; + expect(mem.eql(u1, x, ([4]u1)(v))); + } + { + var v: @Vector(4, bool) = [4]bool{ false, false, true, false}; + var x: [4]bool = v; + expect(mem.eql(bool, x, ([4]bool)(v))); + } + } + }; + S.doTheTest(); + comptime S.doTheTest(); +}