macho: share traditional codepaths with stage2+llvm backend

This commit is contained in:
Jakub Konka 2022-05-07 01:44:26 +02:00
parent a2b8a9756f
commit a2dbe6589e
2 changed files with 13 additions and 10 deletions

View File

@ -58,8 +58,7 @@ make $JOBS install
# Build stage2 standalone so that we can test stage2 against stage2 compiler-rt.
release/bin/zig build -p stage2 -Denable-llvm
# TODO: enable this
#stage2/bin/zig build test-behavior
stage2/bin/zig build test-behavior
# TODO: upgrade these to test stage2 instead of stage1
# TODO: upgrade these to test stage3 instead of stage2

View File

@ -392,8 +392,9 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*MachO {
// Adhoc code signature is required when targeting aarch64-macos either directly or indirectly via the simulator
// ABI such as aarch64-ios-simulator, etc.
const requires_adhoc_codesig = cpu_arch == .aarch64 and (os_tag == .macos or abi == .simulator);
const use_llvm = build_options.have_llvm and options.use_llvm;
const use_stage1 = build_options.is_stage1 and options.use_stage1;
const needs_prealloc = !(use_stage1 or options.cache_mode == .whole);
const needs_prealloc = !(use_stage1 or use_llvm or options.cache_mode == .whole);
const self = try gpa.create(MachO);
errdefer gpa.destroy(self);
@ -410,7 +411,6 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*MachO {
.needs_prealloc = needs_prealloc,
};
const use_llvm = build_options.have_llvm and options.use_llvm;
if (use_llvm and !use_stage1) {
self.llvm_object = try LlvmObject.create(gpa, options);
}
@ -571,7 +571,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
if (mem.eql(u8, prev_digest, &digest)) {
// Hot diggity dog! The output binary is already there.
if (use_stage1) {
const use_llvm = build_options.have_llvm and self.base.options.use_llvm;
if (use_llvm or use_stage1) {
log.debug("MachO Zld digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
self.base.lock = man.toOwnedLock();
return;
@ -1025,7 +1026,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.createTentativeDefAtoms();
try self.parseObjectsIntoAtoms();
if (use_stage1) {
const use_llvm = build_options.have_llvm and self.base.options.use_llvm;
if (use_llvm or use_stage1) {
try self.sortSections();
try self.allocateTextSegment();
try self.allocateDataConstSegment();
@ -1041,7 +1043,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
self.logSectionOrdinals();
}
if (use_stage1) {
if (use_llvm or use_stage1) {
try self.writeAllAtoms();
} else {
try self.writeAtoms();
@ -4930,12 +4932,13 @@ fn allocateSegment(self: *MachO, index: u16, offset: u64) !void {
var start: u64 = offset;
for (seg.sections.items) |*sect, sect_id| {
const is_zerofill = sect.flags == macho.S_ZEROFILL or sect.flags == macho.S_THREAD_LOCAL_ZEROFILL;
const use_llvm = build_options.have_llvm and self.base.options.use_llvm;
const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
const alignment = try math.powi(u32, 2, sect.@"align");
const start_aligned = mem.alignForwardGeneric(u64, start, alignment);
// TODO handle zerofill sections in stage2
sect.offset = if (is_zerofill and use_stage1) 0 else @intCast(u32, seg.inner.fileoff + start_aligned);
sect.offset = if (is_zerofill and (use_stage1 or use_llvm)) 0 else @intCast(u32, seg.inner.fileoff + start_aligned);
sect.addr = seg.inner.vmaddr + start_aligned;
// Recalculate section size given the allocated start address
@ -4963,7 +4966,7 @@ fn allocateSegment(self: *MachO, index: u16, offset: u64) !void {
start = start_aligned + sect.size;
if (!(is_zerofill and use_stage1)) {
if (!(is_zerofill and (use_stage1 or use_llvm))) {
seg.inner.filesize = start;
}
seg.inner.vmsize = start;
@ -5012,10 +5015,11 @@ fn initSection(
sect.addr = seg.inner.vmaddr + off - seg.inner.fileoff;
const is_zerofill = opts.flags == macho.S_ZEROFILL or opts.flags == macho.S_THREAD_LOCAL_ZEROFILL;
const use_llvm = build_options.have_llvm and self.base.options.use_llvm;
const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
// TODO handle zerofill in stage2
if (!(is_zerofill and use_stage1)) {
if (!(is_zerofill and (use_stage1 or use_llvm))) {
sect.offset = @intCast(u32, off);
}
}