Merge remote-tracking branch 'origin/master' into llvm7

This commit is contained in:
Andrew Kelley 2018-07-16 13:37:16 -04:00
commit 558b0b8791
25 changed files with 486 additions and 130 deletions

View File

@ -2310,11 +2310,11 @@ test "while loop continue expression" {
}
test "while loop continue expression, more complicated" {
var i1: usize = 1;
var j1: usize = 1;
while (i1 * j1 < 2000) : ({ i1 *= 2; j1 *= 3; }) {
const my_ij1 = i1 * j1;
assert(my_ij1 < 2000);
var i: usize = 1;
var j: usize = 1;
while (i * j < 2000) : ({ i *= 2; j *= 3; }) {
const my_ij = i * j;
assert(my_ij < 2000);
}
}
{#code_end#}
@ -5424,7 +5424,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#header_close#}
{#header_open|@IntType#}
<pre><code class="zig">@IntType(comptime is_signed: bool, comptime bit_count: u8) type</code></pre>
<pre><code class="zig">@IntType(comptime is_signed: bool, comptime bit_count: u32) type</code></pre>
<p>
This function returns an integer type with the given signness and bit count.
</p>

View File

@ -8,6 +8,7 @@ const ir = @import("ir.zig");
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const event = std.event;
const assert = std.debug.assert;
pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) !void {
fn_val.base.ref();
@ -35,9 +36,23 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code)
try renderToLlvmModule(&ofile, fn_val, code);
// TODO module level assembly
//if (buf_len(&g->global_asm) != 0) {
// LLVMSetModuleInlineAsm(g->module, buf_ptr(&g->global_asm));
//}
// TODO
//ZigLLVMDIBuilderFinalize(g->dbuilder);
if (comp.verbose_llvm_ir) {
llvm.DumpModule(ofile.module);
}
// verify the llvm module when safety is on
if (std.debug.runtime_safety) {
var error_ptr: ?[*]u8 = null;
_ = llvm.VerifyModule(ofile.module, llvm.AbortProcessAction, &error_ptr);
}
}
pub const ObjectFile = struct {
@ -55,5 +70,146 @@ pub const ObjectFile = struct {
pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code) !void {
// TODO audit more of codegen.cpp:fn_llvm_value and port more logic
const llvm_fn_type = try fn_val.base.typeof.getLlvmType(ofile);
const llvm_fn = llvm.AddFunction(ofile.module, fn_val.symbol_name.ptr(), llvm_fn_type);
const llvm_fn = llvm.AddFunction(
ofile.module,
fn_val.symbol_name.ptr(),
llvm_fn_type,
) orelse return error.OutOfMemory;
const want_fn_safety = fn_val.block_scope.safety.get(ofile.comp);
if (want_fn_safety and ofile.comp.haveLibC()) {
try addLLVMFnAttr(ofile, llvm_fn, "sspstrong");
try addLLVMFnAttrStr(ofile, llvm_fn, "stack-protector-buffer-size", "4");
}
// TODO
//if (fn_val.align_stack) |align_stack| {
// try addLLVMFnAttrInt(ofile, llvm_fn, "alignstack", align_stack);
//}
const fn_type = fn_val.base.typeof.cast(Type.Fn).?;
try addLLVMFnAttr(ofile, llvm_fn, "nounwind");
//add_uwtable_attr(g, fn_table_entry->llvm_value);
try addLLVMFnAttr(ofile, llvm_fn, "nobuiltin");
//if (g->build_mode == BuildModeDebug && fn_table_entry->fn_inline != FnInlineAlways) {
// ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim", "true");
// ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim-non-leaf", nullptr);
//}
//if (fn_table_entry->section_name) {
// LLVMSetSection(fn_table_entry->llvm_value, buf_ptr(fn_table_entry->section_name));
//}
//if (fn_table_entry->align_bytes > 0) {
// LLVMSetAlignment(fn_table_entry->llvm_value, (unsigned)fn_table_entry->align_bytes);
//} else {
// // We'd like to set the best alignment for the function here, but on Darwin LLVM gives
// // "Cannot getTypeInfo() on a type that is unsized!" assertion failure when calling
// // any of the functions for getting alignment. Not specifying the alignment should
// // use the ABI alignment, which is fine.
//}
//if (!type_has_bits(return_type)) {
// // nothing to do
//} else if (type_is_codegen_pointer(return_type)) {
// addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull");
//} else if (handle_is_ptr(return_type) &&
// calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc))
//{
// addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret");
// addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull");
//}
// TODO set parameter attributes
// TODO
//uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
//if (err_ret_trace_arg_index != UINT32_MAX) {
// addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull");
//}
const cur_ret_ptr = if (fn_type.return_type.handleIsPtr()) llvm.GetParam(llvm_fn, 0) else null;
// build all basic blocks
for (code.basic_block_list.toSlice()) |bb| {
bb.llvm_block = llvm.AppendBasicBlockInContext(
ofile.context,
llvm_fn,
bb.name_hint,
) orelse return error.OutOfMemory;
}
const entry_bb = code.basic_block_list.at(0);
llvm.PositionBuilderAtEnd(ofile.builder, entry_bb.llvm_block);
llvm.ClearCurrentDebugLocation(ofile.builder);
// TODO set up error return tracing
// TODO allocate temporary stack values
// TODO create debug variable declarations for variables and allocate all local variables
// TODO finishing error return trace setup. we have to do this after all the allocas.
// TODO create debug variable declarations for parameters
for (code.basic_block_list.toSlice()) |current_block| {
llvm.PositionBuilderAtEnd(ofile.builder, current_block.llvm_block);
for (current_block.instruction_list.toSlice()) |instruction| {
if (instruction.ref_count == 0 and !instruction.hasSideEffects()) continue;
instruction.llvm_value = try instruction.render(ofile, fn_val);
}
current_block.llvm_exit_block = llvm.GetInsertBlock(ofile.builder);
}
}
fn addLLVMAttr(
ofile: *ObjectFile,
val: llvm.ValueRef,
attr_index: llvm.AttributeIndex,
attr_name: []const u8,
) !void {
const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len);
assert(kind_id != 0);
const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, 0) orelse return error.OutOfMemory;
llvm.AddAttributeAtIndex(val, attr_index, llvm_attr);
}
fn addLLVMAttrStr(
ofile: *ObjectFile,
val: llvm.ValueRef,
attr_index: llvm.AttributeIndex,
attr_name: []const u8,
attr_val: []const u8,
) !void {
const llvm_attr = llvm.CreateStringAttribute(
ofile.context,
attr_name.ptr,
@intCast(c_uint, attr_name.len),
attr_val.ptr,
@intCast(c_uint, attr_val.len),
) orelse return error.OutOfMemory;
llvm.AddAttributeAtIndex(val, attr_index, llvm_attr);
}
fn addLLVMAttrInt(
val: llvm.ValueRef,
attr_index: llvm.AttributeIndex,
attr_name: []const u8,
attr_val: u64,
) !void {
const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len);
assert(kind_id != 0);
const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, attr_val) orelse return error.OutOfMemory;
llvm.AddAttributeAtIndex(val, attr_index, llvm_attr);
}
fn addLLVMFnAttr(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8) !void {
return addLLVMAttr(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name);
}
fn addLLVMFnAttrStr(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8, attr_val: []const u8) !void {
return addLLVMAttrStr(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name, attr_val);
}
fn addLLVMFnAttrInt(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8, attr_val: u64) !void {
return addLLVMAttrInt(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name, attr_val);
}

View File

@ -606,6 +606,10 @@ pub const Compilation = struct {
return error.Todo;
}
pub fn haveLibC(self: *Compilation) bool {
return self.libc_link_lib != null;
}
pub fn addLinkLib(self: *Compilation, name: []const u8, provided_explicitly: bool) !*LinkLib {
const is_libc = mem.eql(u8, name, "c");
@ -741,7 +745,7 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
analyzed_code.dump();
}
// Kick off rendering to LLVM comp, but it doesn't block the fn decl
// Kick off rendering to LLVM module, but it doesn't block the fn decl
// analysis from being complete.
try comp.build_group.call(codegen.renderToLlvm, comp, fn_val, analyzed_code);
}

View File

@ -10,6 +10,8 @@ const assert = std.debug.assert;
const Token = std.zig.Token;
const ParsedFile = @import("parsed_file.zig").ParsedFile;
const Span = @import("errmsg.zig").Span;
const llvm = @import("llvm.zig");
const ObjectFile = @import("codegen.zig").ObjectFile;
pub const LVal = enum {
None,
@ -61,6 +63,9 @@ pub const Instruction = struct {
/// the instruction that this one derives from in analysis
parent: ?*Instruction,
/// populated durign codegen
llvm_value: ?llvm.ValueRef,
pub fn cast(base: *Instruction, comptime T: type) ?*T {
if (base.id == comptime typeToId(T)) {
return @fieldParentPtr(T, "base", base);
@ -108,14 +113,25 @@ pub const Instruction = struct {
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
const T = @field(Instruction, @memberName(Id, i));
const new_inst = try @fieldParentPtr(T, "base", base).analyze(ira);
new_inst.linkToParent(base);
return new_inst;
return @fieldParentPtr(T, "base", base).analyze(ira);
}
}
unreachable;
}
pub fn render(base: *Instruction, ofile: *ObjectFile, fn_val: *Value.Fn) (error{OutOfMemory}!?llvm.ValueRef) {
switch (base.id) {
Id.Return => return @fieldParentPtr(Return, "base", base).render(ofile, fn_val),
Id.Const => return @fieldParentPtr(Const, "base", base).render(ofile, fn_val),
Id.Ref => @panic("TODO"),
Id.DeclVar => @panic("TODO"),
Id.CheckVoidStmt => @panic("TODO"),
Id.Phi => @panic("TODO"),
Id.Br => @panic("TODO"),
Id.AddImplicitReturnType => @panic("TODO"),
}
}
fn getAsParam(param: *Instruction) !*Instruction {
const child = param.child orelse return error.SemanticAnalysisFailed;
switch (child.val) {
@ -186,6 +202,10 @@ pub const Instruction = struct {
new_inst.val = IrVal{ .KnownValue = self.base.val.KnownValue.getRef() };
return new_inst;
}
pub fn render(self: *Const, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
return self.base.val.KnownValue.getLlvmConst(ofile);
}
};
pub const Return = struct {
@ -214,6 +234,18 @@ pub const Instruction = struct {
return ira.irb.build(Return, self.base.scope, self.base.span, Params{ .return_value = casted_value });
}
pub fn render(self: *Return, ofile: *ObjectFile, fn_val: *Value.Fn) ?llvm.ValueRef {
const value = self.params.return_value.llvm_value;
const return_type = self.params.return_value.getKnownType();
if (return_type.handleIsPtr()) {
@panic("TODO");
} else {
_ = llvm.BuildRet(ofile.builder, value);
}
return null;
}
};
pub const Ref = struct {
@ -387,12 +419,16 @@ pub const Variable = struct {
pub const BasicBlock = struct {
ref_count: usize,
name_hint: []const u8,
name_hint: [*]const u8, // must be a C string literal
debug_id: usize,
scope: *Scope,
instruction_list: std.ArrayList(*Instruction),
ref_instruction: ?*Instruction,
/// for codegen
llvm_block: llvm.BasicBlockRef,
llvm_exit_block: llvm.BasicBlockRef,
/// the basic block that is derived from this one in analysis
child: ?*BasicBlock,
@ -426,7 +462,7 @@ pub const Code = struct {
pub fn dump(self: *Code) void {
var bb_i: usize = 0;
for (self.basic_block_list.toSliceConst()) |bb| {
std.debug.warn("{}_{}:\n", bb.name_hint, bb.debug_id);
std.debug.warn("{s}_{}:\n", bb.name_hint, bb.debug_id);
for (bb.instruction_list.toSliceConst()) |instr| {
std.debug.warn(" ");
instr.dump();
@ -475,7 +511,7 @@ pub const Builder = struct {
}
/// No need to clean up resources thanks to the arena allocator.
pub fn createBasicBlock(self: *Builder, scope: *Scope, name_hint: []const u8) !*BasicBlock {
pub fn createBasicBlock(self: *Builder, scope: *Scope, name_hint: [*]const u8) !*BasicBlock {
const basic_block = try self.arena().create(BasicBlock{
.ref_count = 0,
.name_hint = name_hint,
@ -485,6 +521,8 @@ pub const Builder = struct {
.child = null,
.parent = null,
.ref_instruction = null,
.llvm_block = undefined,
.llvm_exit_block = undefined,
});
self.next_debug_id += 1;
return basic_block;
@ -600,7 +638,7 @@ pub const Builder = struct {
if (block.label) |label| {
block_scope.incoming_values = std.ArrayList(*Instruction).init(irb.arena());
block_scope.incoming_blocks = std.ArrayList(*BasicBlock).init(irb.arena());
block_scope.end_block = try irb.createBasicBlock(parent_scope, "BlockEnd");
block_scope.end_block = try irb.createBasicBlock(parent_scope, c"BlockEnd");
block_scope.is_comptime = try irb.buildConstBool(
parent_scope,
Span.token(block.lbrace),
@ -777,6 +815,7 @@ pub const Builder = struct {
.span = span,
.child = null,
.parent = null,
.llvm_value = undefined,
},
.params = params,
});
@ -968,7 +1007,7 @@ pub async fn gen(
var irb = try Builder.init(comp, parsed_file);
errdefer irb.abort();
const entry_block = try irb.createBasicBlock(scope, "Entry");
const entry_block = try irb.createBasicBlock(scope, c"Entry");
entry_block.ref(); // Entry block gets a reference because we enter it to begin.
try irb.setCursorAtEndAndAppendBlock(entry_block);
@ -1013,6 +1052,7 @@ pub async fn analyze(comp: *Compilation, parsed_file: *ParsedFile, old_code: *Co
}
const return_inst = try old_instruction.analyze(&ira);
return_inst.linkToParent(old_instruction);
// Note: if we ever modify the above to handle error.CompileError by continuing analysis,
// then here we want to check if ira.isCompTime() and return early if true

View File

@ -2,29 +2,91 @@ const builtin = @import("builtin");
const c = @import("c.zig");
const assert = @import("std").debug.assert;
pub const AttributeIndex = c_uint;
pub const Bool = c_int;
pub const BuilderRef = removeNullability(c.LLVMBuilderRef);
pub const ContextRef = removeNullability(c.LLVMContextRef);
pub const ModuleRef = removeNullability(c.LLVMModuleRef);
pub const ValueRef = removeNullability(c.LLVMValueRef);
pub const TypeRef = removeNullability(c.LLVMTypeRef);
pub const BasicBlockRef = removeNullability(c.LLVMBasicBlockRef);
pub const AttributeRef = removeNullability(c.LLVMAttributeRef);
pub const AddAttributeAtIndex = c.LLVMAddAttributeAtIndex;
pub const AddFunction = c.LLVMAddFunction;
pub const ClearCurrentDebugLocation = c.ZigLLVMClearCurrentDebugLocation;
pub const ConstInt = c.LLVMConstInt;
pub const ConstStringInContext = c.LLVMConstStringInContext;
pub const ConstStructInContext = c.LLVMConstStructInContext;
pub const CreateBuilderInContext = c.LLVMCreateBuilderInContext;
pub const CreateEnumAttribute = c.LLVMCreateEnumAttribute;
pub const CreateStringAttribute = c.LLVMCreateStringAttribute;
pub const DisposeBuilder = c.LLVMDisposeBuilder;
pub const DisposeModule = c.LLVMDisposeModule;
pub const DoubleTypeInContext = c.LLVMDoubleTypeInContext;
pub const DumpModule = c.LLVMDumpModule;
pub const FP128TypeInContext = c.LLVMFP128TypeInContext;
pub const FloatTypeInContext = c.LLVMFloatTypeInContext;
pub const GetEnumAttributeKindForName = c.LLVMGetEnumAttributeKindForName;
pub const GetMDKindIDInContext = c.LLVMGetMDKindIDInContext;
pub const HalfTypeInContext = c.LLVMHalfTypeInContext;
pub const InsertBasicBlockInContext = c.LLVMInsertBasicBlockInContext;
pub const Int128TypeInContext = c.LLVMInt128TypeInContext;
pub const Int16TypeInContext = c.LLVMInt16TypeInContext;
pub const Int1TypeInContext = c.LLVMInt1TypeInContext;
pub const Int32TypeInContext = c.LLVMInt32TypeInContext;
pub const Int64TypeInContext = c.LLVMInt64TypeInContext;
pub const Int8TypeInContext = c.LLVMInt8TypeInContext;
pub const IntPtrTypeForASInContext = c.LLVMIntPtrTypeForASInContext;
pub const IntPtrTypeInContext = c.LLVMIntPtrTypeInContext;
pub const IntTypeInContext = c.LLVMIntTypeInContext;
pub const LabelTypeInContext = c.LLVMLabelTypeInContext;
pub const MDNodeInContext = c.LLVMMDNodeInContext;
pub const MDStringInContext = c.LLVMMDStringInContext;
pub const MetadataTypeInContext = c.LLVMMetadataTypeInContext;
pub const ModuleCreateWithNameInContext = c.LLVMModuleCreateWithNameInContext;
pub const PPCFP128TypeInContext = c.LLVMPPCFP128TypeInContext;
pub const StructTypeInContext = c.LLVMStructTypeInContext;
pub const TokenTypeInContext = c.LLVMTokenTypeInContext;
pub const VoidTypeInContext = c.LLVMVoidTypeInContext;
pub const X86FP80TypeInContext = c.LLVMX86FP80TypeInContext;
pub const X86MMXTypeInContext = c.LLVMX86MMXTypeInContext;
pub const ConstAllOnes = c.LLVMConstAllOnes;
pub const ConstNull = c.LLVMConstNull;
pub const VerifyModule = LLVMVerifyModule;
extern fn LLVMVerifyModule(M: ModuleRef, Action: VerifierFailureAction, OutMessage: *?[*]u8) Bool;
pub const GetInsertBlock = LLVMGetInsertBlock;
extern fn LLVMGetInsertBlock(Builder: BuilderRef) BasicBlockRef;
pub const FunctionType = LLVMFunctionType;
extern fn LLVMFunctionType(
ReturnType: TypeRef,
ParamTypes: [*]TypeRef,
ParamCount: c_uint,
IsVarArg: c_int,
IsVarArg: Bool,
) ?TypeRef;
pub const GetParam = LLVMGetParam;
extern fn LLVMGetParam(Fn: ValueRef, Index: c_uint) ValueRef;
pub const AppendBasicBlockInContext = LLVMAppendBasicBlockInContext;
extern fn LLVMAppendBasicBlockInContext(C: ContextRef, Fn: ValueRef, Name: [*]const u8) ?BasicBlockRef;
pub const PositionBuilderAtEnd = LLVMPositionBuilderAtEnd;
extern fn LLVMPositionBuilderAtEnd(Builder: BuilderRef, Block: BasicBlockRef) void;
pub const AbortProcessAction = VerifierFailureAction.LLVMAbortProcessAction;
pub const PrintMessageAction = VerifierFailureAction.LLVMPrintMessageAction;
pub const ReturnStatusAction = VerifierFailureAction.LLVMReturnStatusAction;
pub const VerifierFailureAction = c.LLVMVerifierFailureAction;
fn removeNullability(comptime T: type) type {
comptime assert(@typeId(T) == builtin.TypeId.Optional);
return T.Child;
}
pub const BuildRet = LLVMBuildRet;
extern fn LLVMBuildRet(arg0: BuilderRef, V: ?ValueRef) ValueRef;

View File

@ -1,4 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const Allocator = mem.Allocator;
const Decl = @import("decl.zig").Decl;
const Compilation = @import("compilation.zig").Compilation;
@ -6,6 +7,7 @@ const mem = std.mem;
const ast = std.zig.ast;
const Value = @import("value.zig").Value;
const ir = @import("ir.zig");
const Span = @import("errmsg.zig").Span;
pub const Scope = struct {
id: Id,
@ -93,6 +95,35 @@ pub const Scope = struct {
end_block: *ir.BasicBlock,
is_comptime: *ir.Instruction,
safety: Safety,
const Safety = union(enum) {
Auto,
Manual: Manual,
const Manual = struct {
/// the source span that disabled the safety value
span: Span,
/// whether safety is enabled
enabled: bool,
};
fn get(self: Safety, comp: *Compilation) bool {
return switch (self) {
Safety.Auto => switch (comp.build_mode) {
builtin.Mode.Debug,
builtin.Mode.ReleaseSafe,
=> true,
builtin.Mode.ReleaseFast,
builtin.Mode.ReleaseSmall,
=> false,
},
@TagType(Safety).Manual => |man| man.enabled,
};
}
};
/// Creates a Block scope with 1 reference
pub fn create(comp: *Compilation, parent: ?*Scope) !*Block {
const self = try comp.a().create(Block{
@ -105,6 +136,7 @@ pub const Scope = struct {
.incoming_blocks = undefined,
.end_block = undefined,
.is_comptime = undefined,
.safety = Safety.Auto,
});
errdefer comp.a().destroy(self);

View File

@ -72,6 +72,81 @@ pub const Type = struct {
}
}
pub fn handleIsPtr(base: *Type) bool {
switch (base.id) {
Id.Type,
Id.ComptimeFloat,
Id.ComptimeInt,
Id.Undefined,
Id.Null,
Id.Namespace,
Id.Block,
Id.BoundFn,
Id.ArgTuple,
Id.Opaque,
=> unreachable,
Id.NoReturn,
Id.Void,
Id.Bool,
Id.Int,
Id.Float,
Id.Pointer,
Id.ErrorSet,
Id.Enum,
Id.Fn,
Id.Promise,
=> return false,
Id.Struct => @panic("TODO"),
Id.Array => @panic("TODO"),
Id.Optional => @panic("TODO"),
Id.ErrorUnion => @panic("TODO"),
Id.Union => @panic("TODO"),
}
}
pub fn hasBits(base: *Type) bool {
switch (base.id) {
Id.Type,
Id.ComptimeFloat,
Id.ComptimeInt,
Id.Undefined,
Id.Null,
Id.Namespace,
Id.Block,
Id.BoundFn,
Id.ArgTuple,
Id.Opaque,
=> unreachable,
Id.Void,
Id.NoReturn,
=> return false,
Id.Bool,
Id.Int,
Id.Float,
Id.Fn,
Id.Promise,
=> return true,
Id.ErrorSet => @panic("TODO"),
Id.Enum => @panic("TODO"),
Id.Pointer => @panic("TODO"),
Id.Struct => @panic("TODO"),
Id.Array => @panic("TODO"),
Id.Optional => @panic("TODO"),
Id.ErrorUnion => @panic("TODO"),
Id.Union => @panic("TODO"),
}
}
pub fn cast(base: *Type, comptime T: type) ?*T {
if (base.id != @field(Id, @typeName(T))) return null;
return @fieldParentPtr(T, "base", base);
}
pub fn dump(base: *const Type) void {
std.debug.warn("{}", @tagName(base.id));
}

View File

@ -2,6 +2,8 @@ const std = @import("std");
const builtin = @import("builtin");
const Scope = @import("scope.zig").Scope;
const Compilation = @import("compilation.zig").Compilation;
const ObjectFile = @import("codegen.zig").ObjectFile;
const llvm = @import("llvm.zig");
/// Values are ref-counted, heap-allocated, and copy-on-write
/// If there is only 1 ref then write need not copy
@ -39,6 +41,17 @@ pub const Value = struct {
std.debug.warn("{}", @tagName(base.id));
}
pub fn getLlvmConst(base: *Value, ofile: *ObjectFile) (error{OutOfMemory}!?llvm.ValueRef) {
switch (base.id) {
Id.Type => unreachable,
Id.Fn => @panic("TODO"),
Id.Void => return null,
Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmConst(ofile),
Id.NoReturn => unreachable,
Id.Ptr => @panic("TODO"),
}
}
pub const Id = enum {
Type,
Fn,
@ -123,6 +136,15 @@ pub const Value = struct {
pub fn destroy(self: *Bool, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmConst(self: *Bool, ofile: *ObjectFile) ?llvm.ValueRef {
const llvm_type = llvm.Int1TypeInContext(ofile.context);
if (self.x) {
return llvm.ConstAllOnes(llvm_type);
} else {
return llvm.ConstNull(llvm_type);
}
}
};
pub const NoReturn = struct {

View File

@ -1587,7 +1587,6 @@ struct CodeGen {
struct {
TypeTableEntry *entry_bool;
TypeTableEntry *entry_int[2][12]; // [signed,unsigned][2,3,4,5,6,7,8,16,29,32,64,128]
TypeTableEntry *entry_c_int[CIntTypeCount];
TypeTableEntry *entry_c_longdouble;
TypeTableEntry *entry_c_void;
@ -1596,12 +1595,9 @@ struct CodeGen {
TypeTableEntry *entry_u32;
TypeTableEntry *entry_u29;
TypeTableEntry *entry_u64;
TypeTableEntry *entry_u128;
TypeTableEntry *entry_i8;
TypeTableEntry *entry_i16;
TypeTableEntry *entry_i32;
TypeTableEntry *entry_i64;
TypeTableEntry *entry_i128;
TypeTableEntry *entry_isize;
TypeTableEntry *entry_usize;
TypeTableEntry *entry_f16;

View File

@ -3227,9 +3227,8 @@ static void add_top_level_decl(CodeGen *g, ScopeDecls *decls_scope, Tld *tld) {
}
{
auto entry = g->primitive_type_table.maybe_get(tld->name);
if (entry) {
TypeTableEntry *type = entry->value;
TypeTableEntry *type = get_primitive_type(g, tld->name);
if (type != nullptr) {
add_node_error(g, tld->source_node,
buf_sprintf("declaration shadows type '%s'", buf_ptr(&type->name)));
}
@ -3474,9 +3473,8 @@ VariableTableEntry *add_variable(CodeGen *g, AstNode *source_node, Scope *parent
add_error_note(g, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
variable_entry->value->type = g->builtin_types.entry_invalid;
} else {
auto primitive_table_entry = g->primitive_type_table.maybe_get(name);
if (primitive_table_entry) {
TypeTableEntry *type = primitive_table_entry->value;
TypeTableEntry *type = get_primitive_type(g, name);
if (type != nullptr) {
add_node_error(g, source_node,
buf_sprintf("variable shadows type '%s'", buf_ptr(&type->name)));
variable_entry->value->type = g->builtin_types.entry_invalid;
@ -4307,43 +4305,7 @@ void semantic_analyze(CodeGen *g) {
}
}
TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
size_t index;
if (size_in_bits == 2) {
index = 0;
} else if (size_in_bits == 3) {
index = 1;
} else if (size_in_bits == 4) {
index = 2;
} else if (size_in_bits == 5) {
index = 3;
} else if (size_in_bits == 6) {
index = 4;
} else if (size_in_bits == 7) {
index = 5;
} else if (size_in_bits == 8) {
index = 6;
} else if (size_in_bits == 16) {
index = 7;
} else if (size_in_bits == 29) {
index = 8;
} else if (size_in_bits == 32) {
index = 9;
} else if (size_in_bits == 64) {
index = 10;
} else if (size_in_bits == 128) {
index = 11;
} else {
return nullptr;
}
return &g->builtin_types.entry_int[is_signed ? 0 : 1][index];
}
TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
TypeTableEntry **common_entry = get_int_type_ptr(g, is_signed, size_in_bits);
if (common_entry)
return *common_entry;
TypeId type_id = {};
type_id.id = TypeTableEntryIdInt;
type_id.data.integer.is_signed = is_signed;
@ -4953,6 +4915,8 @@ bool fn_eval_cacheable(Scope *scope, TypeTableEntry *return_type) {
while (scope) {
if (scope->id == ScopeIdVarDecl) {
ScopeVarDecl *var_scope = (ScopeVarDecl *)scope;
if (type_is_invalid(var_scope->var->value->type))
return false;
if (can_mutate_comptime_var_state(var_scope->var->value))
return false;
} else if (scope->id == ScopeIdFnDef) {
@ -6310,3 +6274,28 @@ bool type_can_fail(TypeTableEntry *type_entry) {
bool fn_type_can_fail(FnTypeId *fn_type_id) {
return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync;
}
TypeTableEntry *get_primitive_type(CodeGen *g, Buf *name) {
if (buf_len(name) >= 2) {
uint8_t first_c = buf_ptr(name)[0];
if (first_c == 'i' || first_c == 'u') {
for (size_t i = 1; i < buf_len(name); i += 1) {
uint8_t c = buf_ptr(name)[i];
if (c < '0' || c > '9') {
goto not_integer;
}
}
bool is_signed = (first_c == 'i');
uint32_t bit_count = atoi(buf_ptr(name) + 1);
return get_int_type(g, is_signed, bit_count);
}
}
not_integer:
auto primitive_table_entry = g->primitive_type_table.maybe_get(name);
if (primitive_table_entry != nullptr) {
return primitive_table_entry->value;
}
return nullptr;
}

View File

@ -19,7 +19,6 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count);
uint64_t type_size(CodeGen *g, TypeTableEntry *type_entry);
uint64_t type_size_bits(CodeGen *g, TypeTableEntry *type_entry);
TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_bits);
TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits);
TypeTableEntry **get_c_int_type_ptr(CodeGen *g, CIntType c_int_type);
TypeTableEntry *get_c_int_type(CodeGen *g, CIntType c_int_type);
@ -204,4 +203,6 @@ bool type_can_fail(TypeTableEntry *type_entry);
bool fn_eval_cacheable(Scope *scope, TypeTableEntry *return_type);
AstNode *type_decl_node(TypeTableEntry *type_entry);
TypeTableEntry *get_primitive_type(CodeGen *g, Buf *name);
#endif

View File

@ -5973,21 +5973,6 @@ static void do_code_gen(CodeGen *g) {
}
}
static const uint8_t int_sizes_in_bits[] = {
2,
3,
4,
5,
6,
7,
8,
16,
29,
32,
64,
128,
};
struct CIntTypeInfo {
CIntType id;
const char *name;
@ -6072,16 +6057,6 @@ static void define_builtin_types(CodeGen *g) {
g->builtin_types.entry_arg_tuple = entry;
}
for (size_t int_size_i = 0; int_size_i < array_length(int_sizes_in_bits); int_size_i += 1) {
uint8_t size_in_bits = int_sizes_in_bits[int_size_i];
for (size_t is_sign_i = 0; is_sign_i < array_length(is_signed_list); is_sign_i += 1) {
bool is_signed = is_signed_list[is_sign_i];
TypeTableEntry *entry = make_int_type(g, is_signed, size_in_bits);
g->primitive_type_table.put(&entry->name, entry);
get_int_type_ptr(g, is_signed, size_in_bits)[0] = entry;
}
}
for (size_t i = 0; i < array_length(c_int_type_infos); i += 1) {
const CIntTypeInfo *info = &c_int_type_infos[i];
uint32_t size_in_bits = target_c_type_size_in_bits(&g->zig_target, info->id);
@ -6197,12 +6172,9 @@ static void define_builtin_types(CodeGen *g) {
g->builtin_types.entry_u29 = get_int_type(g, false, 29);
g->builtin_types.entry_u32 = get_int_type(g, false, 32);
g->builtin_types.entry_u64 = get_int_type(g, false, 64);
g->builtin_types.entry_u128 = get_int_type(g, false, 128);
g->builtin_types.entry_i8 = get_int_type(g, true, 8);
g->builtin_types.entry_i16 = get_int_type(g, true, 16);
g->builtin_types.entry_i32 = get_int_type(g, true, 32);
g->builtin_types.entry_i64 = get_int_type(g, true, 64);
g->builtin_types.entry_i128 = get_int_type(g, true, 128);
{
g->builtin_types.entry_c_void = get_opaque_type(g, nullptr, nullptr, "c_void");

View File

@ -3217,9 +3217,8 @@ static VariableTableEntry *create_local_var(CodeGen *codegen, AstNode *node, Sco
add_error_note(codegen, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
variable_entry->value->type = codegen->builtin_types.entry_invalid;
} else {
auto primitive_table_entry = codegen->primitive_type_table.maybe_get(name);
if (primitive_table_entry) {
TypeTableEntry *type = primitive_table_entry->value;
TypeTableEntry *type = get_primitive_type(codegen, name);
if (type != nullptr) {
add_node_error(codegen, node,
buf_sprintf("variable shadows type '%s'", buf_ptr(&type->name)));
variable_entry->value->type = codegen->builtin_types.entry_invalid;
@ -3661,9 +3660,9 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node,
return &const_instruction->base;
}
auto primitive_table_entry = irb->codegen->primitive_type_table.maybe_get(variable_name);
if (primitive_table_entry) {
IrInstruction *value = ir_build_const_type(irb, scope, node, primitive_table_entry->value);
TypeTableEntry *primitive_type = get_primitive_type(irb->codegen, variable_name);
if (primitive_type != nullptr) {
IrInstruction *value = ir_build_const_type(irb, scope, node, primitive_type);
if (lval == LValPtr) {
return ir_build_ref(irb, scope, node, value, false, false);
} else {
@ -10691,11 +10690,11 @@ static bool ir_resolve_align(IrAnalyze *ira, IrInstruction *value, uint32_t *out
return true;
}
static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out) {
static bool ir_resolve_unsigned(IrAnalyze *ira, IrInstruction *value, TypeTableEntry *int_type, uint64_t *out) {
if (type_is_invalid(value->value.type))
return false;
IrInstruction *casted_value = ir_implicit_cast(ira, value, ira->codegen->builtin_types.entry_usize);
IrInstruction *casted_value = ir_implicit_cast(ira, value, int_type);
if (type_is_invalid(casted_value->value.type))
return false;
@ -10707,6 +10706,10 @@ static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out
return true;
}
static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out) {
return ir_resolve_unsigned(ira, value, ira->codegen->builtin_types.entry_usize, out);
}
static bool ir_resolve_bool(IrAnalyze *ira, IrInstruction *value, bool *out) {
if (type_is_invalid(value->value.type))
return false;
@ -18025,7 +18028,7 @@ static TypeTableEntry *ir_analyze_instruction_int_type(IrAnalyze *ira, IrInstruc
IrInstruction *bit_count_value = instruction->bit_count->other;
uint64_t bit_count;
if (!ir_resolve_usize(ira, bit_count_value, &bit_count))
if (!ir_resolve_unsigned(ira, bit_count_value, ira->codegen->builtin_types.entry_u32, &bit_count))
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);

View File

@ -427,7 +427,7 @@ static AstNode *get_global(Context *c, Buf *name) {
if (entry)
return entry->value;
}
if (c->codegen->primitive_type_table.maybe_get(name) != nullptr) {
if (get_primitive_type(c->codegen, name) != nullptr) {
return trans_create_node_symbol(c, name);
}
return nullptr;

View File

@ -5,8 +5,6 @@ const Allocator = mem.Allocator;
const assert = debug.assert;
const ArrayList = std.ArrayList;
const fmt = std.fmt;
/// A buffer that allocates memory and maintains a null byte at the end.
pub const Buffer = struct {
list: ArrayList(u8),

View File

@ -4,8 +4,6 @@ const endian = @import("../endian.zig");
const debug = @import("../debug/index.zig");
const builtin = @import("builtin");
pub const u160 = @IntType(false, 160);
const RoundParam = struct {
a: usize,
b: usize,

View File

@ -40,6 +40,16 @@ pub fn Future(comptime T: type) type {
return &self.data;
}
/// Gets the data without waiting for it. If it's available, a pointer is
/// returned. Otherwise, null is returned.
pub fn getOrNull(self: *Self) ?*T {
if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 1) {
return &self.data;
} else {
return null;
}
}
/// Make the data become available. May be called only once.
/// Before calling this, modify the `data` property.
pub fn resolve(self: *Self) void {

View File

@ -6,9 +6,6 @@ const std = @import("index.zig");
const debug = std.debug;
const mem = std.mem;
const u1 = @IntType(false, 1);
const u256 = @IntType(false, 256);
// A single token slice into the parent string.
//
// Use `token.slice()` on the input at the current position to get the current slice.

View File

@ -996,7 +996,6 @@ pub const Int = struct {
// They will still run on larger than this and should pass, but the multi-limb code-paths
// may be untested in some cases.
const u256 = @IntType(false, 256);
const al = debug.global_allocator;
test "big.int comptime_int set" {

View File

@ -75,18 +75,18 @@ fn exp2_32(x: f32) f32 {
}
var uf = x + redux;
var i0 = @bitCast(u32, uf);
i0 += tblsiz / 2;
var i_0 = @bitCast(u32, uf);
i_0 += tblsiz / 2;
const k = i0 / tblsiz;
const k = i_0 / tblsiz;
// NOTE: musl relies on undefined overflow shift behaviour. Appears that this produces the
// intended result but should confirm how GCC/Clang handle this to ensure.
const uk = @bitCast(f64, u64(0x3FF + k) << 52);
i0 &= tblsiz - 1;
i_0 &= tblsiz - 1;
uf -= redux;
const z: f64 = x - uf;
var r: f64 = exp2ft[i0];
var r: f64 = exp2ft[i_0];
const t: f64 = r * z;
r = r + t * (P1 + z * P2) + t * (z * z) * (P3 + z * P4);
return @floatCast(f32, r * uk);
@ -401,18 +401,18 @@ fn exp2_64(x: f64) f64 {
// reduce x
var uf = x + redux;
// NOTE: musl performs an implicit 64-bit to 32-bit u32 truncation here
var i0 = @truncate(u32, @bitCast(u64, uf));
i0 += tblsiz / 2;
var i_0 = @truncate(u32, @bitCast(u64, uf));
i_0 += tblsiz / 2;
const k: u32 = i0 / tblsiz * tblsiz;
const k: u32 = i_0 / tblsiz * tblsiz;
const ik = @bitCast(i32, k / tblsiz);
i0 %= tblsiz;
i_0 %= tblsiz;
uf -= redux;
// r = exp2(y) = exp2t[i0] * p(z - eps[i])
// r = exp2(y) = exp2t[i_0] * p(z - eps[i])
var z = x - uf;
const t = exp2dt[2 * i0];
z -= exp2dt[2 * i0 + 1];
const t = exp2dt[2 * i_0];
z -= exp2dt[2 * i_0 + 1];
const r = t + t * z * (P1 + z * (P2 + z * (P3 + z * (P4 + z * P5))));
return math.scalbn(r, ik);

View File

@ -354,7 +354,7 @@ test "math.rotl" {
pub fn Log2Int(comptime T: type) type {
// comptime ceil log2
comptime var count: usize = 0;
comptime var count = 0;
comptime var s = T.bit_count - 1;
inline while (s != 0) : (s >>= 1) {
count += 1;

View File

@ -25,7 +25,6 @@ pub fn sleep(seconds: usize, nanoseconds: usize) void {
}
}
const u63 = @IntType(false, 63);
pub fn posixSleep(seconds: u63, nanoseconds: u63) void {
var req = posix.timespec{
.tv_sec = seconds,

View File

@ -58,11 +58,6 @@ test "floating point primitive bit counts" {
assert(f64.bit_count == 64);
}
const u1 = @IntType(false, 1);
const u63 = @IntType(false, 63);
const i1 = @IntType(true, 1);
const i63 = @IntType(true, 63);
test "@minValue and @maxValue" {
assert(@maxValue(u1) == 1);
assert(@maxValue(u8) == 255);

View File

@ -240,7 +240,6 @@ fn getC(data: *const BitField1) u2 {
return data.c;
}
const u24 = @IntType(false, 24);
const Foo24Bits = packed struct {
field: u24,
};

View File

@ -1,6 +1,15 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"optional pointer to void in extern struct",
\\comptime {
\\ _ = @IntType(false, @maxValue(u32) + 1);
\\}
,
".tmp_source.zig:2:40: error: integer value 4294967296 cannot be implicitly casted to type 'u32'",
);
cases.add(
"optional pointer to void in extern struct",
\\const Foo = extern struct {