From d3ca10d5d8bc92280a14f9e40dc41d6accc1b4c2 Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 3 Feb 2025 20:10:44 +0000 Subject: [PATCH 1/9] Zcu: remove `*_loaded` fields on `File` Instead, `source`, `tree`, and `zir` should all be optional. This is precisely what we're actually trying to model here; and `File` isn't optimized for memory consumption or serializability anyway, so it's fine to use a couple of extra bytes on actual optionals here. --- src/Builtin.zig | 22 ++++---- src/Compilation.zig | 20 +++---- src/Package/Module.zig | 10 ++-- src/Sema.zig | 13 +++-- src/Type.zig | 7 ++- src/Zcu.zig | 85 ++++++++++++++--------------- src/Zcu/PerThread.zig | 119 +++++++++++++++++++---------------------- src/link.zig | 3 +- src/link/Dwarf.zig | 17 +++--- src/main.zig | 112 +++++++++++++++++--------------------- src/print_zir.zig | 23 ++++---- 11 files changed, 191 insertions(+), 240 deletions(-) diff --git a/src/Builtin.zig b/src/Builtin.zig index 1782527f699b..95704bbe2faf 100644 --- a/src/Builtin.zig +++ b/src/Builtin.zig @@ -264,14 +264,12 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void { } pub fn populateFile(comp: *Compilation, mod: *Module, file: *File) !void { - assert(file.source_loaded == true); - if (mod.root.statFile(mod.root_src_path)) |stat| { - if (stat.size != file.source.len) { + if (stat.size != file.source.?.len) { std.log.warn( "the cached file '{}{s}' had the wrong size. Expected {d}, found {d}. " ++ "Overwriting with correct file contents now", - .{ mod.root, mod.root_src_path, file.source.len, stat.size }, + .{ mod.root, mod.root_src_path, file.source.?.len, stat.size }, ); try writeFile(file, mod); @@ -296,15 +294,13 @@ pub fn populateFile(comp: *Compilation, mod: *Module, file: *File) !void { log.debug("parsing and generating '{s}'", .{mod.root_src_path}); - file.tree = try std.zig.Ast.parse(comp.gpa, file.source, .zig); - assert(file.tree.errors.len == 0); // builtin.zig must parse - file.tree_loaded = true; + file.tree = try std.zig.Ast.parse(comp.gpa, file.source.?, .zig); + assert(file.tree.?.errors.len == 0); // builtin.zig must parse - file.zir = try AstGen.generate(comp.gpa, file.tree); - assert(!file.zir.hasCompileErrors()); // builtin.zig must not have astgen errors - file.zir_loaded = true; + file.zir = try AstGen.generate(comp.gpa, file.tree.?); + assert(!file.zir.?.hasCompileErrors()); // builtin.zig must not have astgen errors file.status = .success_zir; - // Note that whilst we set `zir_loaded` here, we populated `path_digest` + // Note that whilst we set `zir` here, we populated `path_digest` // all the way back in `Package.Module.create`. } @@ -312,7 +308,7 @@ fn writeFile(file: *File, mod: *Module) !void { var buf: [std.fs.max_path_bytes]u8 = undefined; var af = try mod.root.atomicFile(mod.root_src_path, .{ .make_path = true }, &buf); defer af.deinit(); - try af.file.writeAll(file.source); + try af.file.writeAll(file.source.?); af.finish() catch |err| switch (err) { error.AccessDenied => switch (builtin.os.tag) { .windows => { @@ -326,7 +322,7 @@ fn writeFile(file: *File, mod: *Module) !void { }; file.stat = .{ - .size = file.source.len, + .size = file.source.?.len, .inode = 0, // dummy value .mtime = 0, // dummy value }; diff --git a/src/Compilation.zig b/src/Compilation.zig index dc1d7df320f4..400372740ead 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3211,7 +3211,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } else { // Must be ZIR or Zoir errors. Note that this may include AST errors. _ = try file.getTree(gpa); // Tree must be loaded. - if (file.zir_loaded) { + if (file.zir != null) { try addZirErrorMessages(&bundle, file); } else if (file.zoir != null) { try addZoirErrorMessages(&bundle, file); @@ -3623,22 +3623,17 @@ pub fn addModuleErrorMsg( } pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { - assert(file.zir_loaded); - assert(file.tree_loaded); - assert(file.source_loaded); const gpa = eb.gpa; const src_path = try file.fullPath(gpa); defer gpa.free(src_path); - return eb.addZirErrorMessages(file.zir, file.tree, file.source, src_path); + return eb.addZirErrorMessages(file.zir.?, file.tree.?, file.source.?, src_path); } pub fn addZoirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { - assert(file.source_loaded); - assert(file.tree_loaded); const gpa = eb.gpa; const src_path = try file.fullPath(gpa); defer gpa.free(src_path); - return eb.addZoirErrorMessages(file.zoir.?, file.tree, file.source, src_path); + return eb.addZoirErrorMessages(file.zoir.?, file.tree.?, file.source.?, src_path); } pub fn performAllTheWork( @@ -4312,18 +4307,17 @@ fn workerAstGenFile( // Pre-emptively look for `@import` paths and queue them up. // If we experience an error preemptively fetching the // file, just ignore it and let it happen again later during Sema. - assert(file.zir_loaded); - const imports_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.imports)]; + const imports_index = file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)]; if (imports_index != 0) { - const extra = file.zir.extraData(Zir.Inst.Imports, imports_index); + const extra = file.zir.?.extraData(Zir.Inst.Imports, imports_index); var import_i: u32 = 0; var extra_index = extra.end; while (import_i < extra.data.imports_len) : (import_i += 1) { - const item = file.zir.extraData(Zir.Inst.Imports.Item, extra_index); + const item = file.zir.?.extraData(Zir.Inst.Imports.Item, extra_index); extra_index = item.end; - const import_path = file.zir.nullTerminatedString(item.data.name); + const import_path = file.zir.?.nullTerminatedString(item.data.name); // `@import("builtin")` is handled specially. if (mem.eql(u8, import_path, "builtin")) continue; diff --git a/src/Package/Module.zig b/src/Package/Module.zig index 5b3a487a4939..01a97df25873 100644 --- a/src/Package/Module.zig +++ b/src/Package/Module.zig @@ -482,13 +482,11 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module { }; new_file.* = .{ .sub_file_path = "builtin.zig", - .source = generated_builtin_source, - .source_loaded = true, - .tree_loaded = false, - .zir_loaded = false, .stat = undefined, - .tree = undefined, - .zir = undefined, + .source = generated_builtin_source, + .tree = null, + .zir = null, + .zoir = null, .status = .never_loaded, .prev_status = .never_loaded, .mod = new, diff --git a/src/Sema.zig b/src/Sema.zig index 5f4463a9d586..19252600f0ef 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7649,9 +7649,8 @@ fn analyzeCall( const nav = ip.getNav(info.owner_nav); const resolved_func_inst = info.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(resolved_func_inst.file); - assert(file.zir_loaded); - const zir_info = file.zir.getFnInfo(resolved_func_inst.inst); - break :b .{ nav, file.zir, info.zir_body_inst, resolved_func_inst.inst, zir_info }; + const zir_info = file.zir.?.getFnInfo(resolved_func_inst.inst); + break :b .{ nav, file.zir.?, info.zir_body_inst, resolved_func_inst.inst, zir_info }; } else .{ undefined, undefined, undefined, undefined, undefined }; // This is the `inst_map` used when evaluating generic parameters and return types. @@ -35328,7 +35327,7 @@ fn backingIntType( break :blk accumulator; }; - const zir = zcu.namespacePtr(struct_type.namespace).fileScope(zcu).zir; + const zir = zcu.namespacePtr(struct_type.namespace).fileScope(zcu).zir.?; const zir_index = struct_type.zir_index.resolve(ip) orelse return error.AnalysisFail; const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .struct_decl); @@ -35948,7 +35947,7 @@ fn structFields( const gpa = zcu.gpa; const ip = &zcu.intern_pool; const namespace_index = struct_type.namespace; - const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; + const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir.?; const zir_index = struct_type.zir_index.resolve(ip) orelse return error.AnalysisFail; const fields_len, _, var extra_index = structZirInfo(zir, zir_index); @@ -36149,7 +36148,7 @@ fn structFieldInits( assert(!struct_type.haveFieldInits(ip)); const namespace_index = struct_type.namespace; - const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; + const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir.?; const zir_index = struct_type.zir_index.resolve(ip) orelse return error.AnalysisFail; const fields_len, _, var extra_index = structZirInfo(zir, zir_index); @@ -36268,7 +36267,7 @@ fn unionFields( const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const zir = zcu.namespacePtr(union_type.namespace).fileScope(zcu).zir; + const zir = zcu.namespacePtr(union_type.namespace).fileScope(zcu).zir.?; const zir_index = union_type.zir_index.resolve(ip) orelse return error.AnalysisFail; const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .union_decl); diff --git a/src/Type.zig b/src/Type.zig index 0fd6e184d85c..690cc3fea036 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3587,8 +3587,7 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 { }; const info = tracked.resolveFull(&zcu.intern_pool) orelse return null; const file = zcu.fileByIndex(info.file); - assert(file.zir_loaded); - const zir = file.zir; + const zir = file.zir.?; const inst = zir.instructions.get(@intFromEnum(info.inst)); return switch (inst.tag) { .struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_line, @@ -3905,7 +3904,7 @@ fn resolveStructInner( var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); - const zir = zcu.namespacePtr(struct_obj.namespace).fileScope(zcu).zir; + const zir = zcu.namespacePtr(struct_obj.namespace).fileScope(zcu).zir.?; var sema: Sema = .{ .pt = pt, .gpa = gpa, @@ -3959,7 +3958,7 @@ fn resolveUnionInner( var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); - const zir = zcu.namespacePtr(union_obj.namespace).fileScope(zcu).zir; + const zir = zcu.namespacePtr(union_obj.namespace).fileScope(zcu).zir.?; var sema: Sema = .{ .pt = pt, .gpa = gpa, diff --git a/src/Zcu.zig b/src/Zcu.zig index 507452dda34b..b49c5e83bb0a 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -660,22 +660,17 @@ pub const Namespace = struct { pub const File = struct { status: Status, prev_status: Status, - source_loaded: bool, - tree_loaded: bool, - zir_loaded: bool, /// Relative to the owning package's root source directory. /// Memory is stored in gpa, owned by File. sub_file_path: []const u8, - /// Whether this is populated depends on `source_loaded`. - source: [:0]const u8, /// Whether this is populated depends on `status`. stat: Cache.File.Stat, - /// Whether this is populated or not depends on `tree_loaded`. - tree: Ast, - /// Whether this is populated or not depends on `zir_loaded`. - zir: Zir, - /// Cached Zoir, generated lazily. - zoir: ?Zoir = null, + + source: ?[:0]const u8, + tree: ?Ast, + zir: ?Zir, + zoir: ?Zoir, + /// Module that this file is a part of, managed externally. mod: *Package.Module, /// Whether this file is a part of multiple packages. This is an error condition which will be reported after AstGen. @@ -727,23 +722,23 @@ pub const File = struct { } pub fn unloadTree(file: *File, gpa: Allocator) void { - if (file.tree_loaded) { - file.tree_loaded = false; - file.tree.deinit(gpa); + if (file.tree) |*tree| { + tree.deinit(gpa); + file.tree = null; } } pub fn unloadSource(file: *File, gpa: Allocator) void { - if (file.source_loaded) { - file.source_loaded = false; - gpa.free(file.source); + if (file.source) |source| { + gpa.free(source); + file.source = null; } } pub fn unloadZir(file: *File, gpa: Allocator) void { - if (file.zir_loaded) { - file.zir_loaded = false; - file.zir.deinit(gpa); + if (file.zir) |*zir| { + zir.deinit(gpa); + file.zir = null; } } @@ -753,8 +748,8 @@ pub const File = struct { }; pub fn getSource(file: *File, gpa: Allocator) !Source { - if (file.source_loaded) return Source{ - .bytes = file.source, + if (file.source) |source| return .{ + .bytes = source, .stat = file.stat, }; @@ -769,7 +764,8 @@ pub const File = struct { return error.FileTooBig; const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); - defer if (!file.source_loaded) gpa.free(source); + defer gpa.free(source); + const amt = try f.readAll(source); if (amt != stat.size) return error.UnexpectedEndOfFile; @@ -778,9 +774,9 @@ pub const File = struct { // used for error reporting. We need to keep the stat fields stale so that // astGenFile can know to regenerate ZIR. + errdefer comptime unreachable; // don't error after populating `source` file.source = source; - file.source_loaded = true; - return Source{ + return .{ .bytes = source, .stat = .{ .size = stat.size, @@ -791,20 +787,20 @@ pub const File = struct { } pub fn getTree(file: *File, gpa: Allocator) !*const Ast { - if (file.tree_loaded) return &file.tree; + if (file.tree) |*tree| return tree; const source = try file.getSource(gpa); - file.tree = try Ast.parse(gpa, source.bytes, file.getMode()); - file.tree_loaded = true; - return &file.tree; + file.tree = try .parse(gpa, source.bytes, file.getMode()); + return &file.tree.?; } pub fn getZoir(file: *File, zcu: *Zcu) !*const Zoir { if (file.zoir) |*zoir| return zoir; - assert(file.tree_loaded); - assert(file.tree.mode == .zon); - file.zoir = try ZonGen.generate(zcu.gpa, file.tree, .{}); + const tree = file.tree.?; + assert(tree.mode == .zon); + + file.zoir = try ZonGen.generate(zcu.gpa, tree, .{}); if (file.zoir.?.hasCompileErrors()) { try zcu.failed_files.putNoClobber(zcu.gpa, file, null); return error.AnalysisFail; @@ -900,18 +896,18 @@ pub const File = struct { // We can only mark children as failed if the ZIR is loaded, which may not // be the case if there were other astgen failures in this file - if (!file.zir_loaded) return; + if (file.zir == null) return; - const imports_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.imports)]; + const imports_index = file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)]; if (imports_index == 0) return; - const extra = file.zir.extraData(Zir.Inst.Imports, imports_index); + const extra = file.zir.?.extraData(Zir.Inst.Imports, imports_index); var extra_index = extra.end; for (0..extra.data.imports_len) |_| { - const item = file.zir.extraData(Zir.Inst.Imports.Item, extra_index); + const item = file.zir.?.extraData(Zir.Inst.Imports.Item, extra_index); extra_index = item.end; - const import_path = file.zir.nullTerminatedString(item.data.name); + const import_path = file.zir.?.nullTerminatedString(item.data.name); if (mem.eql(u8, import_path, "builtin")) continue; const res = pt.importFile(file, import_path) catch continue; @@ -1012,7 +1008,7 @@ pub const SrcLoc = struct { lazy: LazySrcLoc.Offset, pub fn baseSrcToken(src_loc: SrcLoc) Ast.TokenIndex { - const tree = src_loc.file_scope.tree; + const tree = src_loc.file_scope.tree.?; return tree.firstToken(src_loc.base_node); } @@ -1057,7 +1053,6 @@ pub const SrcLoc = struct { const node_off = traced_off.x; const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.relativeToNodeIndex(node_off); - assert(src_loc.file_scope.tree_loaded); return tree.nodeToSpan(node); }, .node_offset_main_token => |node_off| { @@ -1069,7 +1064,6 @@ pub const SrcLoc = struct { .node_offset_bin_op => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.relativeToNodeIndex(node_off); - assert(src_loc.file_scope.tree_loaded); return tree.nodeToSpan(node); }, .node_offset_initializer => |node_off| { @@ -2408,9 +2402,8 @@ pub const LazySrcLoc = struct { if (zir_inst == .main_struct_inst) return .{ file, 0 }; // Otherwise, make sure ZIR is loaded. - assert(file.zir_loaded); + const zir = file.zir.?; - const zir = file.zir; const inst = zir.instructions.get(@intFromEnum(zir_inst)); const base_node: Ast.Node.Index = switch (inst.tag) { .declaration => inst.data.declaration.src_node, @@ -3671,7 +3664,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv const inst_info = nav.analysis.?.zir_index.resolveFull(ip) orelse continue; const file = zcu.fileByIndex(inst_info.file); // If the file failed AstGen, the TrackedInst refers to the old ZIR. - const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*; + const zir = if (file.status == .success_zir) file.zir.? else file.prev_zir.?.*; const decl = zir.getDeclaration(inst_info.inst); if (!comp.config.is_test or file.mod != zcu.main_mod) continue; @@ -3703,7 +3696,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue; const file = zcu.fileByIndex(inst_info.file); // If the file failed AstGen, the TrackedInst refers to the old ZIR. - const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*; + const zir = if (file.status == .success_zir) file.zir.? else file.prev_zir.?.*; const decl = zir.getDeclaration(inst_info.inst); if (decl.linkage == .@"export") { const unit: AnalUnit = .wrap(.{ .nav_val = nav }); @@ -3721,7 +3714,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue; const file = zcu.fileByIndex(inst_info.file); // If the file failed AstGen, the TrackedInst refers to the old ZIR. - const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*; + const zir = if (file.status == .success_zir) file.zir.? else file.prev_zir.?.*; const decl = zir.getDeclaration(inst_info.inst); if (decl.linkage == .@"export") { const unit: AnalUnit = .wrap(.{ .nav_val = nav }); @@ -3858,7 +3851,7 @@ pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 { const ip = &zcu.intern_pool; const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?; const zir = zcu.fileByIndex(inst_info.file).zir; - return zir.getDeclaration(inst_info.inst).src_line; + return zir.?.getDeclaration(inst_info.inst).src_line; } pub fn navValue(zcu: *const Zcu, nav_index: InternPool.Nav.Index) Value { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index d564ef8da5d6..a149eee12527 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -209,7 +209,6 @@ pub fn astGenFile( }, else => |e| return e, }; - file.zir_loaded = true; file.stat = .{ .size = header.stat_size, .inode = header.stat_inode, @@ -219,12 +218,12 @@ pub fn astGenFile( file.status = .success_zir; log.debug("AstGen cached success: {s}", .{file.sub_file_path}); - if (file.zir.hasCompileErrors()) { + if (file.zir.?.hasCompileErrors()) { comp.mutex.lock(); defer comp.mutex.unlock(); try zcu.failed_files.putNoClobber(gpa, file, null); } - if (file.zir.loweringFailed()) { + if (file.zir.?.loweringFailed()) { file.status = .astgen_failure; return error.AnalysisFail; } @@ -261,13 +260,12 @@ pub fn astGenFile( // single-threaded context, so we need to keep both versions around // until that point in the pipeline. Previous ZIR data is freed after // that. - if (file.zir_loaded and !file.zir.loweringFailed()) { + if (file.zir != null and !file.zir.?.loweringFailed()) { assert(file.prev_zir == null); const prev_zir_ptr = try gpa.create(Zir); file.prev_zir = prev_zir_ptr; - prev_zir_ptr.* = file.zir; - file.zir = undefined; - file.zir_loaded = false; + prev_zir_ptr.* = file.zir.?; + file.zir = null; } file.unload(gpa); @@ -275,7 +273,7 @@ pub fn astGenFile( return error.FileTooBig; const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); - defer if (!file.source_loaded) gpa.free(source); + defer if (file.source == null) gpa.free(source); const amt = try source_file.readAll(source); if (amt != stat.size) return error.UnexpectedEndOfFile; @@ -286,42 +284,39 @@ pub fn astGenFile( .mtime = stat.mtime, }; file.source = source; - file.source_loaded = true; file.tree = try Ast.parse(gpa, source, .zig); - file.tree_loaded = true; // Any potential AST errors are converted to ZIR errors here. - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; + file.zir = try AstGen.generate(gpa, file.tree.?); file.prev_status = file.status; file.status = .success_zir; log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); const safety_buffer = if (Zcu.data_has_safety_tag) - try gpa.alloc([8]u8, file.zir.instructions.len) + try gpa.alloc([8]u8, file.zir.?.instructions.len) else undefined; defer if (Zcu.data_has_safety_tag) gpa.free(safety_buffer); const data_ptr = if (Zcu.data_has_safety_tag) - if (file.zir.instructions.len == 0) + if (file.zir.?.instructions.len == 0) @as([*]const u8, undefined) else @as([*]const u8, @ptrCast(safety_buffer.ptr)) else - @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); + @as([*]const u8, @ptrCast(file.zir.?.instructions.items(.data).ptr)); if (Zcu.data_has_safety_tag) { // The `Data` union has a safety tag but in the file format we store it without. - for (file.zir.instructions.items(.data), 0..) |*data, i| { + for (file.zir.?.instructions.items(.data), 0..) |*data, i| { const as_struct: *const Zcu.HackDataLayout = @ptrCast(data); safety_buffer[i] = as_struct.data; } } const header: Zir.Header = .{ - .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), - .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), - .extra_len = @as(u32, @intCast(file.zir.extra.len)), + .instructions_len = @as(u32, @intCast(file.zir.?.instructions.len)), + .string_bytes_len = @as(u32, @intCast(file.zir.?.string_bytes.len)), + .extra_len = @as(u32, @intCast(file.zir.?.extra.len)), .stat_size = stat.size, .stat_inode = stat.inode, @@ -333,20 +328,20 @@ pub fn astGenFile( .len = @sizeOf(Zir.Header), }, .{ - .base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), - .len = file.zir.instructions.len, + .base = @as([*]const u8, @ptrCast(file.zir.?.instructions.items(.tag).ptr)), + .len = file.zir.?.instructions.len, }, .{ .base = data_ptr, - .len = file.zir.instructions.len * 8, + .len = file.zir.?.instructions.len * 8, }, .{ - .base = file.zir.string_bytes.ptr, - .len = file.zir.string_bytes.len, + .base = file.zir.?.string_bytes.ptr, + .len = file.zir.?.string_bytes.len, }, .{ - .base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), - .len = file.zir.extra.len * 4, + .base = @as([*]const u8, @ptrCast(file.zir.?.extra.ptr)), + .len = file.zir.?.extra.len * 4, }, }; cache_file.writevAll(&iovecs) catch |err| { @@ -355,12 +350,12 @@ pub fn astGenFile( }); }; - if (file.zir.hasCompileErrors()) { + if (file.zir.?.hasCompileErrors()) { comp.mutex.lock(); defer comp.mutex.unlock(); try zcu.failed_files.putNoClobber(gpa, file, null); } - if (file.zir.loweringFailed()) { + if (file.zir.?.loweringFailed()) { file.status = .astgen_failure; return error.AnalysisFail; } @@ -392,7 +387,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { try zcu.markDependeeOutdated(.not_marked_po, .{ .file = file_index }); } const old_zir = file.prev_zir orelse continue; - const new_zir = file.zir; + const new_zir = file.zir.?; const gop = try updated_files.getOrPut(gpa, file_index); assert(!gop.found_existing); gop.value_ptr.* = .{ @@ -400,7 +395,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { .inst_map = .{}, }; if (!new_zir.loweringFailed()) { - try Zcu.mapOldZirToNew(gpa, old_zir.*, file.zir, &gop.value_ptr.inst_map); + try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, &gop.value_ptr.inst_map); } } @@ -426,7 +421,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { // Either way, invalidate associated `src_hash` deps. log.debug("tracking failed for %{d}{s}", .{ old_inst, - if (file.zir.loweringFailed()) " due to AstGen failure" else "", + if (file.zir.?.loweringFailed()) " due to AstGen failure" else "", }); tracked_inst.inst = .lost; try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index }); @@ -435,7 +430,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { tracked_inst.inst = InternPool.TrackedInst.MaybeLost.ZirIndex.wrap(new_inst); const old_zir = file.prev_zir.?.*; - const new_zir = file.zir; + const new_zir = file.zir.?; const old_tag = old_zir.instructions.items(.tag)[@intFromEnum(old_inst)]; const old_data = old_zir.instructions.items(.data)[@intFromEnum(old_inst)]; @@ -532,7 +527,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { for (updated_files.keys(), updated_files.values()) |file_index, updated_file| { const file = updated_file.file; - if (file.zir.loweringFailed()) { + if (file.zir.?.loweringFailed()) { // Keep `prev_zir` around: it's the last usable ZIR. // Don't update the namespace, as we have no new data to update *to*. } else { @@ -805,7 +800,7 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu // unnecessary, and we can move the below `removeDependenciesForDepender` call up with its friends // in `ensureComptimeUnitUpToDate`. if (file.status != .success_zir) return error.AnalysisFail; - const zir = file.zir; + const zir = file.zir.?; // We are about to re-analyze this unit; drop its depenndencies. zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit); @@ -1002,7 +997,7 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr // unnecessary, and we can move the below `removeDependenciesForDepender` call up with its friends // in `ensureComptimeUnitUpToDate`. if (file.status != .success_zir) return error.AnalysisFail; - const zir = file.zir; + const zir = file.zir.?; // We are about to re-analyze this unit; drop its depenndencies. zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit); @@ -1380,7 +1375,7 @@ fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileEr // unnecessary, and we can move the below `removeDependenciesForDepender` call up with its friends // in `ensureComptimeUnitUpToDate`. if (file.status != .success_zir) return error.AnalysisFail; - const zir = file.zir; + const zir = file.zir.?; // We are about to re-analyze this unit; drop its depenndencies. zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit); @@ -1758,7 +1753,7 @@ fn createFileRootStruct( const gpa = zcu.gpa; const ip = &zcu.intern_pool; const file = zcu.fileByIndex(file_index); - const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; + const extended = file.zir.?.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); assert(!small.has_captures_len); @@ -1766,16 +1761,16 @@ fn createFileRootStruct( assert(small.layout == .auto); var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len; const fields_len = if (small.has_fields_len) blk: { - const fields_len = file.zir.extra[extra_index]; + const fields_len = file.zir.?.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) blk: { - const decls_len = file.zir.extra[extra_index]; + const decls_len = file.zir.?.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; - const decls = file.zir.bodySlice(extra_index, decls_len); + const decls = file.zir.?.bodySlice(extra_index, decls_len); extra_index += decls_len; const tracked_inst = try ip.trackZir(gpa, pt.tid, .{ @@ -1844,17 +1839,17 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator. const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu); const decls = decls: { - const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; + const extended = file.zir.?.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len; extra_index += @intFromBool(small.has_fields_len); const decls_len = if (small.has_decls_len) blk: { - const decls_len = file.zir.extra[extra_index]; + const decls_len = file.zir.?.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; - break :decls file.zir.bodySlice(extra_index, decls_len); + break :decls file.zir.?.bodySlice(extra_index, decls_len); }; try pt.scanNamespace(namespace_index, decls); zcu.namespacePtr(namespace_index).generation = zcu.generation; @@ -1873,7 +1868,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { if (file.status != .success_zir) { return error.AnalysisFail; } - assert(file.zir_loaded); + assert(file.zir != null); const new_namespace_index = try pt.createNamespace(.{ .parent = .none, @@ -1983,13 +1978,11 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { gop.value_ptr.* = new_file_index; new_file.* = .{ .sub_file_path = sub_file_path, - .source = undefined, - .source_loaded = false, - .tree_loaded = false, - .zir_loaded = false, .stat = undefined, - .tree = undefined, - .zir = undefined, + .source = null, + .tree = null, + .zir = null, + .zoir = null, .status = .never_loaded, .prev_status = .never_loaded, .mod = mod, @@ -2096,13 +2089,11 @@ pub fn importFile( gop.value_ptr.* = new_file_index; new_file.* = .{ .sub_file_path = sub_file_path, - .source = undefined, - .source_loaded = false, - .tree_loaded = false, - .zir_loaded = false, .stat = undefined, - .tree = undefined, - .zir = undefined, + .source = null, + .tree = null, + .zir = null, + .zoir = null, .status = .never_loaded, .prev_status = .never_loaded, .mod = mod, @@ -2441,7 +2432,7 @@ const ScanDeclIter = struct { const namespace = zcu.namespacePtr(namespace_index); const gpa = zcu.gpa; const file = namespace.fileScope(zcu); - const zir = file.zir; + const zir = file.zir.?; const ip = &zcu.intern_pool; const decl = zir.getDeclaration(decl_inst); @@ -2591,7 +2582,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE const func = zcu.funcInfo(func_index); const inst_info = func.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_info.file); - const zir = file.zir; + const zir = file.zir.?; try zcu.analysis_in_progress.put(gpa, anal_unit, {}); errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit); @@ -2843,7 +2834,9 @@ pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Err /// Removes any entry from `Zcu.failed_files` associated with `file`. Acquires `Compilation.mutex` as needed. /// `file.zir` must be unchanged from the last update, as it is used to determine if there is such an entry. fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { - if (!file.zir_loaded or !file.zir.hasCompileErrors()) return; + const zir = file.zir orelse return; + if (zir.hasCompileErrors()) return; + pt.zcu.comp.mutex.lock(); defer pt.zcu.comp.mutex.unlock(); if (pt.zcu.failed_files.fetchSwapRemove(file)) |kv| { @@ -3779,7 +3772,7 @@ fn recreateStructType( const inst_info = key.zir_index.resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); assert(file.status == .success_zir); // otherwise inst tracking failed - const zir = file.zir; + const zir = file.zir.?; assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; @@ -3852,7 +3845,7 @@ fn recreateUnionType( const inst_info = key.zir_index.resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); assert(file.status == .success_zir); // otherwise inst tracking failed - const zir = file.zir; + const zir = file.zir.?; assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; @@ -3939,7 +3932,7 @@ fn recreateEnumType( const inst_info = key.zir_index.resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); assert(file.status == .success_zir); // otherwise inst tracking failed - const zir = file.zir; + const zir = file.zir.?; assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; @@ -4083,7 +4076,7 @@ pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_info.file); if (file.status != .success_zir) return error.AnalysisFail; - const zir = file.zir; + const zir = file.zir.?; assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; diff --git a/src/link.zig b/src/link.zig index e6ee788095f7..d2e841ffb602 100644 --- a/src/link.zig +++ b/src/link.zig @@ -750,8 +750,7 @@ pub const File = struct { { const ti = ti_id.resolveFull(&pt.zcu.intern_pool).?; const file = pt.zcu.fileByIndex(ti.file); - assert(file.zir_loaded); - const inst = file.zir.instructions.get(@intFromEnum(ti.inst)); + const inst = file.zir.?.instructions.get(@intFromEnum(ti.inst)); assert(inst.tag == .declaration); } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 392d9dd181bf..6f596b61618a 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2358,8 +2358,7 @@ fn initWipNavInner( const nav = ip.getNav(nav_index); const inst_info = nav.srcInst(ip).resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); - assert(file.zir_loaded); - const decl = file.zir.getDeclaration(inst_info.inst); + const decl = file.zir.?.getDeclaration(inst_info.inst); log.debug("initWipNav({s}:{d}:{d} %{d} = {})", .{ file.sub_file_path, decl.src_line + 1, @@ -2373,7 +2372,7 @@ fn initWipNavInner( switch (nav_key) { // Ignore @extern .@"extern" => |@"extern"| if (decl.linkage != .@"extern" or - !@"extern".name.eqlSlice(file.zir.nullTerminatedString(decl.name), ip)) return null, + !@"extern".name.eqlSlice(file.zir.?.nullTerminatedString(decl.name), ip)) return null, else => {}, } @@ -2696,8 +2695,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo const nav = ip.getNav(nav_index); const inst_info = nav.srcInst(ip).resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); - assert(file.zir_loaded); - const decl = file.zir.getDeclaration(inst_info.inst); + const decl = file.zir.?.getDeclaration(inst_info.inst); log.debug("updateComptimeNav({s}:{d}:{d} %{d} = {})", .{ file.sub_file_path, decl.src_line + 1, @@ -4097,7 +4095,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP // if a newly-tracked instruction can be a type's owner `zir_index`. comptime assert(Zir.inst_tracking_version == 0); - const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst)); + const decl_inst = file.zir.?.instructions.get(@intFromEnum(inst_info.inst)); const name_strat: Zir.Inst.NameStrategy = switch (decl_inst.tag) { .struct_init, .struct_init_ref, .struct_init_anon => .anon, .extended => switch (decl_inst.data.extended.opcode) { @@ -4301,14 +4299,13 @@ pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedI const inst_info = zir_index.resolveFull(ip).?; assert(inst_info.inst != .main_struct_inst); const file = zcu.fileByIndex(inst_info.file); - assert(file.zir_loaded); - const decl = file.zir.getDeclaration(inst_info.inst); + const decl = file.zir.?.getDeclaration(inst_info.inst); log.debug("updateLineNumber({s}:{d}:{d} %{d} = {s})", .{ file.sub_file_path, decl.src_line + 1, decl.src_column + 1, @intFromEnum(inst_info.inst), - file.zir.nullTerminatedString(decl.name), + file.zir.?.nullTerminatedString(decl.name), }); var line_buf: [4]u8 = undefined; @@ -4661,7 +4658,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void { .target_unit = StringSection.unit, .target_entry = (try dwarf.debug_line_str.addString( dwarf, - if (file.mod.builtin_file == file) file.source else "", + if (file.mod.builtin_file == file) file.source.? else "", )).toOptional(), }); header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes()); diff --git a/src/main.zig b/src/main.zig index ba5ebf8efd09..8d30bef23767 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3636,7 +3636,7 @@ fn buildOutputType( if (show_builtin) { const builtin_mod = comp.root_mod.getBuiltinDependency(); - const source = builtin_mod.builtin_file.?.source; + const source = builtin_mod.builtin_file.?.source.?; return std.io.getStdOut().writeAll(source); } switch (listen) { @@ -6135,14 +6135,12 @@ fn cmdAstCheck( var file: Zcu.File = .{ .status = .never_loaded, .prev_status = .never_loaded, - .source_loaded = false, - .tree_loaded = false, - .zir_loaded = false, .sub_file_path = undefined, - .source = undefined, .stat = undefined, - .tree = undefined, - .zir = undefined, + .source = null, + .tree = null, + .zir = null, + .zoir = null, .mod = undefined, }; if (zig_source_file) |file_name| { @@ -6163,7 +6161,6 @@ fn cmdAstCheck( file.sub_file_path = file_name; file.source = source; - file.source_loaded = true; file.stat = .{ .size = stat.size, .inode = stat.inode, @@ -6176,7 +6173,6 @@ fn cmdAstCheck( }; file.sub_file_path = ""; file.source = source; - file.source_loaded = true; file.stat.size = source.len; } @@ -6196,17 +6192,15 @@ fn cmdAstCheck( .fully_qualified_name = "root", }); - file.tree = try Ast.parse(gpa, file.source, mode); - file.tree_loaded = true; - defer file.tree.deinit(gpa); + file.tree = try Ast.parse(gpa, file.source.?, mode); + defer file.tree.?.deinit(gpa); switch (mode) { .zig => { - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; - defer file.zir.deinit(gpa); + file.zir = try AstGen.generate(gpa, file.tree.?); + defer file.zir.?.deinit(gpa); - if (file.zir.hasCompileErrors()) { + if (file.zir.?.hasCompileErrors()) { var wip_errors: std.zig.ErrorBundle.Wip = undefined; try wip_errors.init(gpa); defer wip_errors.deinit(); @@ -6215,13 +6209,13 @@ fn cmdAstCheck( defer error_bundle.deinit(gpa); error_bundle.renderToStdErr(color.renderOptions()); - if (file.zir.loweringFailed()) { + if (file.zir.?.loweringFailed()) { process.exit(1); } } if (!want_output_text) { - if (file.zir.hasCompileErrors()) { + if (file.zir.?.hasCompileErrors()) { process.exit(1); } else { return cleanExit(); @@ -6233,18 +6227,18 @@ fn cmdAstCheck( { const token_bytes = @sizeOf(Ast.TokenList) + - file.tree.tokens.len * (@sizeOf(std.zig.Token.Tag) + @sizeOf(Ast.ByteOffset)); - const tree_bytes = @sizeOf(Ast) + file.tree.nodes.len * + file.tree.?.tokens.len * (@sizeOf(std.zig.Token.Tag) + @sizeOf(Ast.ByteOffset)); + const tree_bytes = @sizeOf(Ast) + file.tree.?.nodes.len * (@sizeOf(Ast.Node.Tag) + @sizeOf(Ast.Node.Data) + @sizeOf(Ast.TokenIndex)); - const instruction_bytes = file.zir.instructions.len * + const instruction_bytes = file.zir.?.instructions.len * // Here we don't use @sizeOf(Zir.Inst.Data) because it would include // the debug safety tag but we want to measure release size. (@sizeOf(Zir.Inst.Tag) + 8); - const extra_bytes = file.zir.extra.len * @sizeOf(u32); + const extra_bytes = file.zir.?.extra.len * @sizeOf(u32); const total_bytes = @sizeOf(Zir) + instruction_bytes + extra_bytes + - file.zir.string_bytes.len * @sizeOf(u8); + file.zir.?.string_bytes.len * @sizeOf(u8); const stdout = io.getStdOut(); const fmtIntSizeBin = std.fmt.fmtIntSizeBin; // zig fmt: off @@ -6258,27 +6252,27 @@ fn cmdAstCheck( \\# Extra Data Items: {d} ({}) \\ , .{ - fmtIntSizeBin(file.source.len), - file.tree.tokens.len, fmtIntSizeBin(token_bytes), - file.tree.nodes.len, fmtIntSizeBin(tree_bytes), + fmtIntSizeBin(file.source.?.len), + file.tree.?.tokens.len, fmtIntSizeBin(token_bytes), + file.tree.?.nodes.len, fmtIntSizeBin(tree_bytes), fmtIntSizeBin(total_bytes), - file.zir.instructions.len, fmtIntSizeBin(instruction_bytes), - fmtIntSizeBin(file.zir.string_bytes.len), - file.zir.extra.len, fmtIntSizeBin(extra_bytes), + file.zir.?.instructions.len, fmtIntSizeBin(instruction_bytes), + fmtIntSizeBin(file.zir.?.string_bytes.len), + file.zir.?.extra.len, fmtIntSizeBin(extra_bytes), }); // zig fmt: on } try @import("print_zir.zig").renderAsTextToFile(gpa, &file, io.getStdOut()); - if (file.zir.hasCompileErrors()) { + if (file.zir.?.hasCompileErrors()) { process.exit(1); } else { return cleanExit(); } }, .zon => { - const zoir = try ZonGen.generate(gpa, file.tree, .{}); + const zoir = try ZonGen.generate(gpa, file.tree.?, .{}); defer zoir.deinit(gpa); if (zoir.hasCompileErrors()) { @@ -6289,7 +6283,7 @@ fn cmdAstCheck( { const src_path = try file.fullPath(gpa); defer gpa.free(src_path); - try wip_errors.addZoirErrorMessages(zoir, file.tree, file.source, src_path); + try wip_errors.addZoirErrorMessages(zoir, file.tree.?, file.source.?, src_path); } var error_bundle = try wip_errors.toOwnedBundle(""); @@ -6519,26 +6513,24 @@ fn cmdDumpZir( var file: Zcu.File = .{ .status = .never_loaded, .prev_status = .never_loaded, - .source_loaded = false, - .tree_loaded = false, - .zir_loaded = true, .sub_file_path = undefined, - .source = undefined, .stat = undefined, - .tree = undefined, + .source = null, + .tree = null, .zir = try Zcu.loadZirCache(gpa, f), + .zoir = null, .mod = undefined, }; - defer file.zir.deinit(gpa); + defer file.zir.?.deinit(gpa); { - const instruction_bytes = file.zir.instructions.len * + const instruction_bytes = file.zir.?.instructions.len * // Here we don't use @sizeOf(Zir.Inst.Data) because it would include // the debug safety tag but we want to measure release size. (@sizeOf(Zir.Inst.Tag) + 8); - const extra_bytes = file.zir.extra.len * @sizeOf(u32); + const extra_bytes = file.zir.?.extra.len * @sizeOf(u32); const total_bytes = @sizeOf(Zir) + instruction_bytes + extra_bytes + - file.zir.string_bytes.len * @sizeOf(u8); + file.zir.?.string_bytes.len * @sizeOf(u8); const stdout = io.getStdOut(); const fmtIntSizeBin = std.fmt.fmtIntSizeBin; // zig fmt: off @@ -6550,9 +6542,9 @@ fn cmdDumpZir( \\ , .{ fmtIntSizeBin(total_bytes), - file.zir.instructions.len, fmtIntSizeBin(instruction_bytes), - fmtIntSizeBin(file.zir.string_bytes.len), - file.zir.extra.len, fmtIntSizeBin(extra_bytes), + file.zir.?.instructions.len, fmtIntSizeBin(instruction_bytes), + fmtIntSizeBin(file.zir.?.string_bytes.len), + file.zir.?.extra.len, fmtIntSizeBin(extra_bytes), }); // zig fmt: on } @@ -6587,18 +6579,16 @@ fn cmdChangelist( var file: Zcu.File = .{ .status = .never_loaded, .prev_status = .never_loaded, - .source_loaded = false, - .tree_loaded = false, - .zir_loaded = false, .sub_file_path = old_source_file, - .source = undefined, .stat = .{ .size = stat.size, .inode = stat.inode, .mtime = stat.mtime, }, - .tree = undefined, - .zir = undefined, + .source = null, + .tree = null, + .zir = null, + .zoir = null, .mod = undefined, }; @@ -6613,17 +6603,14 @@ fn cmdChangelist( if (amt != stat.size) return error.UnexpectedEndOfFile; file.source = source; - file.source_loaded = true; - file.tree = try Ast.parse(gpa, file.source, .zig); - file.tree_loaded = true; - defer file.tree.deinit(gpa); + file.tree = try Ast.parse(gpa, file.source.?, .zig); + defer file.tree.?.deinit(gpa); - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; - defer file.zir.deinit(gpa); + file.zir = try AstGen.generate(gpa, file.tree.?); + defer file.zir.?.deinit(gpa); - if (file.zir.loweringFailed()) { + if (file.zir.?.loweringFailed()) { var wip_errors: std.zig.ErrorBundle.Wip = undefined; try wip_errors.init(gpa); defer wip_errors.deinit(); @@ -6652,13 +6639,12 @@ fn cmdChangelist( var new_tree = try Ast.parse(gpa, new_source, .zig); defer new_tree.deinit(gpa); - var old_zir = file.zir; + var old_zir = file.zir.?; defer old_zir.deinit(gpa); - file.zir_loaded = false; + file.zir = null; file.zir = try AstGen.generate(gpa, new_tree); - file.zir_loaded = true; - if (file.zir.loweringFailed()) { + if (file.zir.?.loweringFailed()) { var wip_errors: std.zig.ErrorBundle.Wip = undefined; try wip_errors.init(gpa); defer wip_errors.deinit(); @@ -6672,7 +6658,7 @@ fn cmdChangelist( var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty; defer inst_map.deinit(gpa); - try Zcu.mapOldZirToNew(gpa, old_zir, file.zir, &inst_map); + try Zcu.mapOldZirToNew(gpa, old_zir, file.zir.?, &inst_map); var bw = io.bufferedWriter(io.getStdOut().writer()); const stdout = bw.writer(); diff --git a/src/print_zir.zig b/src/print_zir.zig index 033ea82de9cb..0757ca83de58 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -22,7 +22,7 @@ pub fn renderAsTextToFile( .gpa = gpa, .arena = arena.allocator(), .file = scope_file, - .code = scope_file.zir, + .code = scope_file.zir.?, .indent = 0, .parent_decl_node = 0, .recurse_decls = true, @@ -36,18 +36,18 @@ pub fn renderAsTextToFile( try stream.print("%{d} ", .{@intFromEnum(main_struct_inst)}); try writer.writeInstToStream(stream, main_struct_inst); try stream.writeAll("\n"); - const imports_index = scope_file.zir.extra[@intFromEnum(Zir.ExtraIndex.imports)]; + const imports_index = scope_file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)]; if (imports_index != 0) { try stream.writeAll("Imports:\n"); - const extra = scope_file.zir.extraData(Zir.Inst.Imports, imports_index); + const extra = scope_file.zir.?.extraData(Zir.Inst.Imports, imports_index); var extra_index = extra.end; for (0..extra.data.imports_len) |_| { - const item = scope_file.zir.extraData(Zir.Inst.Imports.Item, extra_index); + const item = scope_file.zir.?.extraData(Zir.Inst.Imports.Item, extra_index); extra_index = item.end; - const import_path = scope_file.zir.nullTerminatedString(item.data.name); + const import_path = scope_file.zir.?.nullTerminatedString(item.data.name); try stream.print(" @import(\"{}\") ", .{ std.zig.fmtEscapes(import_path), }); @@ -75,7 +75,7 @@ pub fn renderInstructionContext( .gpa = gpa, .arena = arena.allocator(), .file = scope_file, - .code = scope_file.zir, + .code = scope_file.zir.?, .indent = if (indent < 2) 2 else indent, .parent_decl_node = parent_decl_node, .recurse_decls = false, @@ -107,7 +107,7 @@ pub fn renderSingleInstruction( .gpa = gpa, .arena = arena.allocator(), .file = scope_file, - .code = scope_file.zir, + .code = scope_file.zir.?, .indent = indent, .parent_decl_node = parent_decl_node, .recurse_decls = false, @@ -2759,8 +2759,7 @@ const Writer = struct { } fn writeSrcNode(self: *Writer, stream: anytype, src_node: i32) !void { - if (!self.file.tree_loaded) return; - const tree = self.file.tree; + const tree = self.file.tree orelse return; const abs_node = self.relativeToNodeIndex(src_node); const src_span = tree.nodeToSpan(abs_node); const start = self.line_col_cursor.find(tree.source, src_span.start); @@ -2772,8 +2771,7 @@ const Writer = struct { } fn writeSrcTok(self: *Writer, stream: anytype, src_tok: u32) !void { - if (!self.file.tree_loaded) return; - const tree = self.file.tree; + const tree = self.file.tree orelse return; const abs_tok = tree.firstToken(self.parent_decl_node) + src_tok; const span_start = tree.tokens.items(.start)[abs_tok]; const span_end = span_start + @as(u32, @intCast(tree.tokenSlice(abs_tok).len)); @@ -2786,8 +2784,7 @@ const Writer = struct { } fn writeSrcTokAbs(self: *Writer, stream: anytype, src_tok: u32) !void { - if (!self.file.tree_loaded) return; - const tree = self.file.tree; + const tree = self.file.tree orelse return; const span_start = tree.tokens.items(.start)[src_tok]; const span_end = span_start + @as(u32, @intCast(tree.tokenSlice(src_tok).len)); const start = self.line_col_cursor.find(tree.source, span_start); From 5e20a47469f5d6feac7fb0785b2437e417b068ef Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 3 Feb 2025 21:42:50 +0000 Subject: [PATCH 2/9] Zcu: remove unused `parse_failure` field from `File.Status` These are reported as `astgen_failure` instead. --- src/Zcu.zig | 5 ++--- src/Zcu/PerThread.zig | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Zcu.zig b/src/Zcu.zig index b49c5e83bb0a..bb8c64836677 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -687,7 +687,6 @@ pub const File = struct { pub const Status = enum { never_loaded, retryable_failure, - parse_failure, astgen_failure, success_zir, }; @@ -852,7 +851,7 @@ pub const File = struct { pub fn okToReportErrors(file: File) bool { return switch (file.status) { - .parse_failure, .astgen_failure => false, + .astgen_failure => false, else => true, }; } @@ -3299,7 +3298,7 @@ pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode { fn lockAndClearFileCompileError(zcu: *Zcu, file: *File) void { switch (file.status) { .success_zir, .retryable_failure => {}, - .never_loaded, .parse_failure, .astgen_failure => { + .never_loaded, .astgen_failure => { zcu.comp.mutex.lock(); defer zcu.comp.mutex.unlock(); if (zcu.failed_files.fetchSwapRemove(file)) |kv| { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index a149eee12527..c18145180d56 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -109,7 +109,7 @@ pub fn astGenFile( break :lock .shared; }, - .parse_failure, .astgen_failure, .success_zir => lock: { + .astgen_failure, .success_zir => lock: { const unchanged_metadata = stat.size == file.stat.size and stat.mtime == file.stat.mtime and From a8e53801d0bfe2132831b8286c4a237788aea8fa Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 4 Feb 2025 11:55:54 +0000 Subject: [PATCH 3/9] compiler: don't perform semantic analysis if there are files without ZIR --- src/Builtin.zig | 2 +- src/Compilation.zig | 56 +++++++++------- src/Package/Module.zig | 1 - src/Zcu.zig | 77 ++++++++++------------ src/Zcu/PerThread.zig | 146 ++++++++++++++++------------------------- src/main.zig | 3 - 6 files changed, 122 insertions(+), 163 deletions(-) diff --git a/src/Builtin.zig b/src/Builtin.zig index 95704bbe2faf..ac23cafb3c44 100644 --- a/src/Builtin.zig +++ b/src/Builtin.zig @@ -299,7 +299,7 @@ pub fn populateFile(comp: *Compilation, mod: *Module, file: *File) !void { file.zir = try AstGen.generate(comp.gpa, file.tree.?); assert(!file.zir.?.hasCompileErrors()); // builtin.zig must not have astgen errors - file.status = .success_zir; + file.status = .success; // Note that whilst we set `zir` here, we populated `path_digest` // all the way back in `Package.Module.create`. } diff --git a/src/Compilation.zig b/src/Compilation.zig index 400372740ead..28017eeed9a4 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3203,8 +3203,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } if (comp.zcu) |zcu| { - const ip = &zcu.intern_pool; - for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { try addModuleErrorMsg(zcu, &bundle, msg.*); @@ -3277,20 +3275,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (!refs.contains(anal_unit)) continue; } - report_ok: { - const file_index = switch (anal_unit.unwrap()) { - .@"comptime" => |cu| ip.getComptimeUnit(cu).zir_index.resolveFile(ip), - .nav_val, .nav_ty => |nav| ip.getNav(nav).analysis.?.zir_index.resolveFile(ip), - .type => |ty| Type.fromInterned(ty).typeDeclInst(zcu).?.resolveFile(ip), - .func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFile(ip), - .memoized_state => break :report_ok, // always report std.builtin errors - }; - - // Skip errors for AnalUnits within files that had a parse failure. - // We'll try again once parsing succeeds. - if (!zcu.fileByIndex(file_index).okToReportErrors()) continue; - } - std.log.scoped(.zcu).debug("analysis error '{s}' reported from unit '{}'", .{ error_msg.msg, zcu.fmtAnalUnit(anal_unit), @@ -3318,12 +3302,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } } - for (zcu.failed_codegen.keys(), zcu.failed_codegen.values()) |nav, error_msg| { - if (!zcu.navFileScope(nav).okToReportErrors()) continue; + for (zcu.failed_codegen.values()) |error_msg| { try addModuleErrorMsg(zcu, &bundle, error_msg.*); } - for (zcu.failed_types.keys(), zcu.failed_types.values()) |ty_index, error_msg| { - if (!zcu.typeFileScope(ty_index).okToReportErrors()) continue; + for (zcu.failed_types.values()) |error_msg| { try addModuleErrorMsg(zcu, &bundle, error_msg.*); } for (zcu.failed_exports.values()) |value| { @@ -3827,12 +3809,35 @@ fn performAllTheWorkInner( if (comp.zcu) |zcu| { const pt: Zcu.PerThread = .activate(zcu, .main); defer pt.deactivate(); + + try reportMultiModuleErrors(pt); + + const any_fatal_files = for (zcu.import_table.values()) |file_index| { + const file = zcu.fileByIndex(file_index); + if (file.getMode() == .zon) continue; + switch (file.status) { + .never_loaded => unreachable, // everything is loaded by the workers + .retryable_failure, .astgen_failure => break true, + .success => {}, + } + } else false; + + if (any_fatal_files) { + // We give up right now! No updating of ZIR refs, no nothing. The idea is that this prevents + // us from invalidating lots of incremental dependencies due to files with e.g. parse errors. + // However, this means our analysis data is invalid, so we want to omit all analysis errors. + // To do that, let's just clear the analysis roots! + + assert(zcu.failed_files.count() > 0); // we will get an error + zcu.analysis_roots.clear(); // no analysis happened + return; + } + if (comp.incremental) { const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0); defer update_zir_refs_node.end(); try pt.updateZirRefs(); } - try reportMultiModuleErrors(pt); try zcu.flushRetryableFailures(); zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); @@ -4294,11 +4299,12 @@ fn workerAstGenFile( pt.astGenFile(file, path_digest) catch |err| switch (err) { error.AnalysisFail => return, else => { - file.status = .retryable_failure; pt.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) { - // Swallowing this error is OK because it's implied to be OOM when - // there is a missing `failed_files` error message. - error.OutOfMemory => {}, + error.OutOfMemory => { + comp.mutex.lock(); + defer comp.mutex.unlock(); + comp.setAllocFailure(); + }, }; return; }, diff --git a/src/Package/Module.zig b/src/Package/Module.zig index 01a97df25873..0dec7bde76e5 100644 --- a/src/Package/Module.zig +++ b/src/Package/Module.zig @@ -488,7 +488,6 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module { .zir = null, .zoir = null, .status = .never_loaded, - .prev_status = .never_loaded, .mod = new, }; break :b new; diff --git a/src/Zcu.zig b/src/Zcu.zig index bb8c64836677..0ce43ce78d87 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -658,11 +658,27 @@ pub const Namespace = struct { }; pub const File = struct { - status: Status, - prev_status: Status, /// Relative to the owning package's root source directory. /// Memory is stored in gpa, owned by File. sub_file_path: []const u8, + + status: enum { + /// We have not yet attempted to load this file. + /// `stat` is not populated and may be `undefined`. + never_loaded, + /// A filesystem access failed. It should be retried on the next update. + /// There is a `failed_files` entry containing a non-`null` message. + /// `stat` is not populated and may be `undefined`. + retryable_failure, + /// Parsing/AstGen/ZonGen of this file has failed. + /// There is an error in `zir` or `zoir`. + /// There is a `failed_files` entry (with a `null` message). + /// `stat` is populated. + astgen_failure, + /// Parsing and AstGen/ZonGen of this file has succeeded. + /// `stat` is populated. + success, + }, /// Whether this is populated depends on `status`. stat: Cache.File.Stat, @@ -678,19 +694,17 @@ pub const File = struct { /// List of references to this file, used for multi-package errors. references: std.ArrayListUnmanaged(File.Reference) = .empty, - /// The most recent successful ZIR for this file, with no errors. - /// This is only populated when a previously successful ZIR - /// newly introduces compile errors during an update. When ZIR is - /// successful, this field is unloaded. + /// The ZIR for this file from the last update with no file failures. As such, this ZIR is never + /// failed (although it may have compile errors). + /// + /// Because updates with file failures do not perform ZIR mapping or semantic analysis, we keep + /// this around so we have the "old" ZIR to map when an update is ready to do so. Once such an + /// update occurs, this field is unloaded, since it is no longer necessary. + /// + /// In other words, if `TrackedInst`s are tied to ZIR other than what's in the `zir` field, this + /// field is populated with that old ZIR. prev_zir: ?*Zir = null, - pub const Status = enum { - never_loaded, - retryable_failure, - astgen_failure, - success_zir, - }; - /// A single reference to a file. pub const Reference = union(enum) { /// The file is imported directly (i.e. not as a package) with @import. @@ -763,7 +777,7 @@ pub const File = struct { return error.FileTooBig; const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); - defer gpa.free(source); + errdefer gpa.free(source); const amt = try f.readAll(source); if (amt != stat.size) @@ -773,8 +787,9 @@ pub const File = struct { // used for error reporting. We need to keep the stat fields stale so that // astGenFile can know to regenerate ZIR. - errdefer comptime unreachable; // don't error after populating `source` file.source = source; + errdefer comptime unreachable; // don't error after populating `source` + return .{ .bytes = source, .stat = .{ @@ -849,13 +864,6 @@ pub const File = struct { std.debug.print("{s}:{d}:{d}\n", .{ file.sub_file_path, loc.line + 1, loc.column + 1 }); } - pub fn okToReportErrors(file: File) bool { - return switch (file.status) { - .astgen_failure => false, - else => true, - }; - } - /// Add a reference to this file during AstGen. pub fn addReference(file: *File, zcu: *Zcu, ref: File.Reference) !void { // Don't add the same module root twice. Note that since we always add module roots at the @@ -3295,19 +3303,6 @@ pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode { return zcu.root_mod.optimize_mode; } -fn lockAndClearFileCompileError(zcu: *Zcu, file: *File) void { - switch (file.status) { - .success_zir, .retryable_failure => {}, - .never_loaded, .astgen_failure => { - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - if (zcu.failed_files.fetchSwapRemove(file)) |kv| { - if (kv.value) |msg| msg.destroy(zcu.gpa); // Delete previous error message. - } - }, - } -} - pub fn handleUpdateExports( zcu: *Zcu, export_indices: []const Export.Index, @@ -3662,9 +3657,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv // `test` declarations are analyzed depending on the test filter. const inst_info = nav.analysis.?.zir_index.resolveFull(ip) orelse continue; const file = zcu.fileByIndex(inst_info.file); - // If the file failed AstGen, the TrackedInst refers to the old ZIR. - const zir = if (file.status == .success_zir) file.zir.? else file.prev_zir.?.*; - const decl = zir.getDeclaration(inst_info.inst); + const decl = file.zir.?.getDeclaration(inst_info.inst); if (!comp.config.is_test or file.mod != zcu.main_mod) continue; @@ -3694,9 +3687,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv // These are named declarations. They are analyzed only if marked `export`. const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue; const file = zcu.fileByIndex(inst_info.file); - // If the file failed AstGen, the TrackedInst refers to the old ZIR. - const zir = if (file.status == .success_zir) file.zir.? else file.prev_zir.?.*; - const decl = zir.getDeclaration(inst_info.inst); + const decl = file.zir.?.getDeclaration(inst_info.inst); if (decl.linkage == .@"export") { const unit: AnalUnit = .wrap(.{ .nav_val = nav }); if (!result.contains(unit)) { @@ -3712,9 +3703,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv // These are named declarations. They are analyzed only if marked `export`. const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue; const file = zcu.fileByIndex(inst_info.file); - // If the file failed AstGen, the TrackedInst refers to the old ZIR. - const zir = if (file.status == .success_zir) file.zir.? else file.prev_zir.?.*; - const decl = zir.getDeclaration(inst_info.inst); + const decl = file.zir.?.getDeclaration(inst_info.inst); if (decl.linkage == .@"export") { const unit: AnalUnit = .wrap(.{ .nav_val = nav }); if (!result.contains(unit)) { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index c18145180d56..cc323c4ac04b 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -109,7 +109,7 @@ pub fn astGenFile( break :lock .shared; }, - .astgen_failure, .success_zir => lock: { + .astgen_failure, .success => lock: { const unchanged_metadata = stat.size == file.stat.size and stat.mtime == file.stat.mtime and @@ -214,8 +214,7 @@ pub fn astGenFile( .inode = header.stat_inode, .mtime = header.stat_mtime, }; - file.prev_status = file.status; - file.status = .success_zir; + file.status = .success; log.debug("AstGen cached success: {s}", .{file.sub_file_path}); if (file.zir.?.hasCompileErrors()) { @@ -248,19 +247,11 @@ pub fn astGenFile( pt.lockAndClearFileCompileError(file); - // Previous ZIR is kept for two reasons: - // - // 1. In case an update to the file causes a Parse or AstGen failure, we - // need to compare two successful ZIR files in order to proceed with an - // incremental update. This avoids needlessly tossing out semantic - // analysis work when an error is temporarily introduced. - // - // 2. In order to detect updates, we need to iterate over the intern pool - // values while comparing old ZIR to new ZIR. This is better done in a - // single-threaded context, so we need to keep both versions around - // until that point in the pipeline. Previous ZIR data is freed after - // that. - if (file.zir != null and !file.zir.?.loweringFailed()) { + // If `zir` is not null, and `prev_zir` is null, then `TrackedInst`s are associated with `zir`. + // We need to keep it around! + // As an optimization, also check `loweringFailed`; if true, but `prev_zir == null`, then this + // file has never passed AstGen, so we actually need not cache the old ZIR. + if (file.zir != null and file.prev_zir == null and !file.zir.?.loweringFailed()) { assert(file.prev_zir == null); const prev_zir_ptr = try gpa.create(Zir); file.prev_zir = prev_zir_ptr; @@ -289,8 +280,7 @@ pub fn astGenFile( // Any potential AST errors are converted to ZIR errors here. file.zir = try AstGen.generate(gpa, file.tree.?); - file.prev_status = file.status; - file.status = .success_zir; + file.status = .success; log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); const safety_buffer = if (Zcu.data_has_safety_tag) @@ -383,9 +373,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { defer cleanupUpdatedFiles(gpa, &updated_files); for (zcu.import_table.values()) |file_index| { const file = zcu.fileByIndex(file_index); - if (file.prev_status != file.status and file.prev_status != .never_loaded) { - try zcu.markDependeeOutdated(.not_marked_po, .{ .file = file_index }); - } + assert(file.status == .success); const old_zir = file.prev_zir orelse continue; const new_zir = file.zir.?; const gop = try updated_files.getOrPut(gpa, file_index); @@ -394,9 +382,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { .file = file, .inst_map = .{}, }; - if (!new_zir.loweringFailed()) { - try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, &gop.value_ptr.inst_map); - } + try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, &gop.value_ptr.inst_map); } if (updated_files.count() == 0) @@ -416,13 +402,9 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { .index = @intCast(tracked_inst_unwrapped_index), }).wrap(ip); const new_inst = updated_file.inst_map.get(old_inst) orelse { - // Tracking failed for this instruction. - // This may be due to changes in the ZIR, or AstGen might have failed due to a very broken file. - // Either way, invalidate associated `src_hash` deps. - log.debug("tracking failed for %{d}{s}", .{ - old_inst, - if (file.zir.?.loweringFailed()) " due to AstGen failure" else "", - }); + // Tracking failed for this instruction due to changes in the ZIR. + // Invalidate associated `src_hash` deps. + log.debug("tracking failed for %{d}", .{old_inst}); tracked_inst.inst = .lost; try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index }); continue; @@ -527,23 +509,19 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { for (updated_files.keys(), updated_files.values()) |file_index, updated_file| { const file = updated_file.file; - if (file.zir.?.loweringFailed()) { - // Keep `prev_zir` around: it's the last usable ZIR. - // Don't update the namespace, as we have no new data to update *to*. - } else { - const prev_zir = file.prev_zir.?; - file.prev_zir = null; - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); - - // For every file which has changed, re-scan the namespace of the file's root struct type. - // These types are special-cased because they don't have an enclosing declaration which will - // be re-analyzed (causing the struct's namespace to be re-scanned). It's fine to do this - // now because this work is fast (no actual Sema work is happening, we're just updating the - // namespace contents). We must do this after updating ZIR refs above, since `scanNamespace` - // will track some instructions. - try pt.updateFileNamespace(file_index); - } + + const prev_zir = file.prev_zir.?; + file.prev_zir = null; + prev_zir.deinit(gpa); + gpa.destroy(prev_zir); + + // For every file which has changed, re-scan the namespace of the file's root struct type. + // These types are special-cased because they don't have an enclosing declaration which will + // be re-analyzed (causing the struct's namespace to be re-scanned). It's fine to do this + // now because this work is fast (no actual Sema work is happening, we're just updating the + // namespace contents). We must do this after updating ZIR refs above, since `scanNamespace` + // will track some instructions. + try pt.updateFileNamespace(file_index); } } @@ -745,6 +723,7 @@ pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeU kv.value.destroy(gpa); } _ = zcu.transitive_failed_analysis.swapRemove(anal_unit); + zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit); } } else { // We can trust the current information about this unit. @@ -796,15 +775,8 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu const inst_resolved = comptime_unit.zir_index.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_resolved.file); - // TODO: stop the compiler ever reaching Sema if there are failed files. That way, this check is - // unnecessary, and we can move the below `removeDependenciesForDepender` call up with its friends - // in `ensureComptimeUnitUpToDate`. - if (file.status != .success_zir) return error.AnalysisFail; const zir = file.zir.?; - // We are about to re-analyze this unit; drop its depenndencies. - zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit); - try zcu.analysis_in_progress.put(gpa, anal_unit, {}); defer assert(zcu.analysis_in_progress.swapRemove(anal_unit)); @@ -923,6 +895,7 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu kv.value.destroy(gpa); } _ = zcu.transitive_failed_analysis.swapRemove(anal_unit); + ip.removeDependenciesForDepender(gpa, anal_unit); } else { // We can trust the current information about this unit. if (prev_failed) return error.AnalysisFail; @@ -993,15 +966,8 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_resolved.file); - // TODO: stop the compiler ever reaching Sema if there are failed files. That way, this check is - // unnecessary, and we can move the below `removeDependenciesForDepender` call up with its friends - // in `ensureComptimeUnitUpToDate`. - if (file.status != .success_zir) return error.AnalysisFail; const zir = file.zir.?; - // We are about to re-analyze this unit; drop its depenndencies. - zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit); - try zcu.analysis_in_progress.put(gpa, anal_unit, {}); errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit); @@ -1301,6 +1267,7 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc kv.value.destroy(gpa); } _ = zcu.transitive_failed_analysis.swapRemove(anal_unit); + ip.removeDependenciesForDepender(gpa, anal_unit); } else { // We can trust the current information about this unit. if (prev_failed) return error.AnalysisFail; @@ -1371,15 +1338,8 @@ fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileEr const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_resolved.file); - // TODO: stop the compiler ever reaching Sema if there are failed files. That way, this check is - // unnecessary, and we can move the below `removeDependenciesForDepender` call up with its friends - // in `ensureComptimeUnitUpToDate`. - if (file.status != .success_zir) return error.AnalysisFail; const zir = file.zir.?; - // We are about to re-analyze this unit; drop its depenndencies. - zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit); - try zcu.analysis_in_progress.put(gpa, anal_unit, {}); defer _ = zcu.analysis_in_progress.swapRemove(anal_unit); @@ -1828,7 +1788,6 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator. const zcu = pt.zcu; const file = zcu.fileByIndex(file_index); - assert(file.status == .success_zir); const file_root_type = zcu.fileRootType(file_index); if (file_root_type == .none) return; @@ -1865,9 +1824,6 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { assert(file.getMode() == .zig); assert(zcu.fileRootType(file_index) == .none); - if (file.status != .success_zir) { - return error.AnalysisFail; - } assert(file.zir != null); const new_namespace_index = try pt.createNamespace(.{ @@ -1910,7 +1866,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { } } -pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { +pub fn importPkg(pt: Zcu.PerThread, mod: *Module) Allocator.Error!Zcu.ImportFileResult { const zcu = pt.zcu; const gpa = zcu.gpa; @@ -1984,7 +1940,6 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { .zir = null, .zoir = null, .status = .never_loaded, - .prev_status = .never_loaded, .mod = mod, }; @@ -1997,13 +1952,19 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { }; } -/// Called from a worker thread during AstGen. +/// Called from a worker thread during AstGen (with the Compilation mutex held). /// Also called from Sema during semantic analysis. +/// Does not attempt to load the file from disk; just returns a corresponding `*Zcu.File`. pub fn importFile( pt: Zcu.PerThread, cur_file: *Zcu.File, import_string: []const u8, -) !Zcu.ImportFileResult { +) error{ + OutOfMemory, + ModuleNotFound, + ImportOutsideModulePath, + CurrentWorkingDirectoryUnlinked, +}!Zcu.ImportFileResult { const zcu = pt.zcu; const mod = cur_file.mod; @@ -2061,7 +2022,10 @@ pub fn importFile( defer gpa.free(resolved_root_path); const sub_file_path = p: { - const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path); + const relative = std.fs.path.relative(gpa, resolved_root_path, resolved_path) catch |err| switch (err) { + error.Unexpected => unreachable, + else => |e| return e, + }; errdefer gpa.free(relative); if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) { @@ -2089,13 +2053,15 @@ pub fn importFile( gop.value_ptr.* = new_file_index; new_file.* = .{ .sub_file_path = sub_file_path, + + .status = .never_loaded, .stat = undefined, + .source = null, .tree = null, .zir = null, .zoir = null, - .status = .never_loaded, - .prev_status = .never_loaded, + .mod = mod, }; @@ -2835,7 +2801,7 @@ pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Err /// `file.zir` must be unchanged from the last update, as it is used to determine if there is such an entry. fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { const zir = file.zir orelse return; - if (zir.hasCompileErrors()) return; + if (!zir.hasCompileErrors()) return; pt.zcu.comp.mutex.lock(); defer pt.zcu.comp.mutex.unlock(); @@ -3196,6 +3162,7 @@ pub fn linkerUpdateLineNumber(pt: Zcu.PerThread, ti: InternPool.TrackedInst.Inde } } +/// Sets `File.status` of `file_index` to `retryable_failure`, and stores an error in `pt.zcu.failed_files`. pub fn reportRetryableAstGenError( pt: Zcu.PerThread, src: Zcu.AstGenSrc, @@ -3231,13 +3198,18 @@ pub fn reportRetryableAstGenError( }); errdefer err_msg.destroy(gpa); - { - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, err_msg); + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + const gop = try zcu.failed_files.getOrPut(gpa, file); + if (gop.found_existing) { + if (gop.value_ptr.*) |old_err_msg| { + old_err_msg.destroy(gpa); + } } + gop.value_ptr.* = err_msg; } +/// Sets `File.status` of `file_index` to `retryable_failure`, and stores an error in `pt.zcu.failed_files`. pub fn reportRetryableFileError( pt: Zcu.PerThread, file_index: Zcu.File.Index, @@ -3771,7 +3743,6 @@ fn recreateStructType( const inst_info = key.zir_index.resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); - assert(file.status == .success_zir); // otherwise inst tracking failed const zir = file.zir.?; assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); @@ -3844,7 +3815,6 @@ fn recreateUnionType( const inst_info = key.zir_index.resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); - assert(file.status == .success_zir); // otherwise inst tracking failed const zir = file.zir.?; assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); @@ -3931,7 +3901,6 @@ fn recreateEnumType( const inst_info = key.zir_index.resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); - assert(file.status == .success_zir); // otherwise inst tracking failed const zir = file.zir.?; assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); @@ -4075,7 +4044,6 @@ pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_info.file); - if (file.status != .success_zir) return error.AnalysisFail; const zir = file.zir.?; assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); diff --git a/src/main.zig b/src/main.zig index 8d30bef23767..daf6010dcc51 100644 --- a/src/main.zig +++ b/src/main.zig @@ -6134,7 +6134,6 @@ fn cmdAstCheck( var file: Zcu.File = .{ .status = .never_loaded, - .prev_status = .never_loaded, .sub_file_path = undefined, .stat = undefined, .source = null, @@ -6512,7 +6511,6 @@ fn cmdDumpZir( var file: Zcu.File = .{ .status = .never_loaded, - .prev_status = .never_loaded, .sub_file_path = undefined, .stat = undefined, .source = null, @@ -6578,7 +6576,6 @@ fn cmdChangelist( var file: Zcu.File = .{ .status = .never_loaded, - .prev_status = .never_loaded, .sub_file_path = old_source_file, .stat = .{ .size = stat.size, From 0907432fffd2d9f5319022332a60fc88d3aacf4e Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 4 Feb 2025 12:00:34 +0000 Subject: [PATCH 4/9] compiler: a few renames This is mainly in preparation for integrating ZonGen into the pipeline properly, although these names are better because `astGenFile` isn't *necessarily* running AstGen; it may determine that the current ZIR is up-to-date, or load cached ZIR. --- src/Compilation.zig | 12 ++++++------ src/Sema.zig | 2 +- src/Zcu.zig | 2 +- src/Zcu/PerThread.zig | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 28017eeed9a4..f585fd7e9823 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3779,7 +3779,7 @@ fn performAllTheWorkInner( // will be needed by the worker threads. const path_digest = zcu.filePathDigest(file_index); const file = zcu.fileByIndex(file_index); - comp.thread_pool.spawnWgId(&astgen_wait_group, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(&astgen_wait_group, workerUpdateFile, .{ comp, file, file_index, path_digest, zir_prog_node, &astgen_wait_group, .root, }); } @@ -3787,7 +3787,7 @@ fn performAllTheWorkInner( for (0.., zcu.embed_table.values()) |ef_index_usize, ef| { const ef_index: Zcu.EmbedFile.Index = @enumFromInt(ef_index_usize); - comp.thread_pool.spawnWgId(&astgen_wait_group, workerCheckEmbedFile, .{ + comp.thread_pool.spawnWgId(&astgen_wait_group, workerUpdateEmbedFile, .{ comp, ef_index, ef, }); } @@ -4280,7 +4280,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye }; } -fn workerAstGenFile( +fn workerUpdateFile( tid: usize, comp: *Compilation, file: *Zcu.File, @@ -4296,7 +4296,7 @@ fn workerAstGenFile( const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); defer pt.deactivate(); - pt.astGenFile(file, path_digest) catch |err| switch (err) { + pt.updateFile(file, path_digest) catch |err| switch (err) { error.AnalysisFail => return, else => { pt.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) { @@ -4352,7 +4352,7 @@ fn workerAstGenFile( .importing_file = file_index, .import_tok = item.data.token, } }; - comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(wg, workerUpdateFile, .{ comp, import_result.file, import_result.file_index, imported_path_digest, prog_node, wg, sub_src, }); } @@ -4375,7 +4375,7 @@ fn workerUpdateBuiltinZigFile( }; } -fn workerCheckEmbedFile(tid: usize, comp: *Compilation, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) void { +fn workerUpdateEmbedFile(tid: usize, comp: *Compilation, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) void { comp.detectEmbedFileUpdate(@enumFromInt(tid), ef_index, ef) catch |err| switch (err) { error.OutOfMemory => { comp.mutex.lock(); diff --git a/src/Sema.zig b/src/Sema.zig index 19252600f0ef..97b9fbbaf919 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6140,7 +6140,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); const path_digest = zcu.filePathDigest(result.file_index); - pt.astGenFile(result.file, path_digest) catch |err| + pt.updateFile(result.file, path_digest) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); try sema.declareDependency(.{ .file = result.file_index }); diff --git a/src/Zcu.zig b/src/Zcu.zig index 0ce43ce78d87..f0262a146236 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -785,7 +785,7 @@ pub const File = struct { // Here we do not modify stat fields because this function is the one // used for error reporting. We need to keep the stat fields stale so that - // astGenFile can know to regenerate ZIR. + // updateFile can know to regenerate ZIR. file.source = source; errdefer comptime unreachable; // don't error after populating `source` diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index cc323c4ac04b..d45092ad7dfc 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -73,7 +73,7 @@ pub fn destroyFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void { if (!is_builtin) gpa.destroy(file); } -pub fn astGenFile( +pub fn updateFile( pt: Zcu.PerThread, file: *Zcu.File, path_digest: Cache.BinDigest, From 55a2e535fdb663793b84769cb6c3a261bda3fc66 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 4 Feb 2025 14:03:40 +0000 Subject: [PATCH 5/9] compiler: integrate ZON with the ZIR caching system This came with a big cleanup to `Zcu.PerThread.updateFile` (formerly `astGenFile`). Also, change how the cache manifest works for files in the import table. Instead of being added to the manifest when we call `semaFile` on them, we iterate the import table after running the AstGen workers and add all the files to the cache manifest then. The downside is that this is a bit more eager to include files in the manifest; in particular, files which are imported but not actually referenced are now included in analysis. So, for instance, modifying any standard library file will invalidate all Zig compilations using that standard library, even if they don't use that file. The original motivation here was simply that the old logic in `semaFile` didn't translate nicely to ZON. However, it turns out to actually be necessary for correctness. Because `@import("foo.zig")` is an AstGen-level error if `foo.zig` does not exist, we need to invalidate the cache when an imported but unreferenced file is removed to make sure this error is triggered when it needs to be. Resolves: #22746 --- lib/std/zig/Zoir.zig | 25 ++++ src/Compilation.zig | 46 +++++- src/Sema.zig | 6 - src/Sema/LowerZon.zig | 2 - src/Zcu.zig | 183 +++++++++++++++++++++++ src/Zcu/PerThread.zig | 330 ++++++++++++++++++++---------------------- 6 files changed, 405 insertions(+), 187 deletions(-) diff --git a/lib/std/zig/Zoir.zig b/lib/std/zig/Zoir.zig index af93d03261ff..700bf6ea32d8 100644 --- a/lib/std/zig/Zoir.zig +++ b/lib/std/zig/Zoir.zig @@ -10,6 +10,31 @@ string_bytes: []u8, compile_errors: []Zoir.CompileError, error_notes: []Zoir.CompileError.Note, +/// The data stored at byte offset 0 when ZOIR is stored in a file. +pub const Header = extern struct { + nodes_len: u32, + extra_len: u32, + limbs_len: u32, + string_bytes_len: u32, + compile_errors_len: u32, + error_notes_len: u32, + + /// We could leave this as padding, however it triggers a Valgrind warning because + /// we read and write undefined bytes to the file system. This is harmless, but + /// it's essentially free to have a zero field here and makes the warning go away, + /// making it more likely that following Valgrind warnings will be taken seriously. + unused: u64 = 0, + + stat_inode: std.fs.File.INode, + stat_size: u64, + stat_mtime: i128, + + comptime { + // Check that `unused` is working as expected + assert(std.meta.hasUniqueRepresentation(Header)); + } +}; + pub fn hasCompileErrors(zoir: Zoir) bool { if (zoir.compile_errors.len > 0) { assert(zoir.nodes.len == 0); diff --git a/src/Compilation.zig b/src/Compilation.zig index f585fd7e9823..93ce5ba6bee5 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2220,10 +2220,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { try comp.astgen_work_queue.ensureUnusedCapacity(zcu.import_table.count()); for (zcu.import_table.values()) |file_index| { if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue; - const file = zcu.fileByIndex(file_index); - if (file.getMode() == .zig) { - comp.astgen_work_queue.writeItemAssumeCapacity(file_index); - } + comp.astgen_work_queue.writeItemAssumeCapacity(file_index); } if (comp.file_system_inputs) |fsi| { for (zcu.import_table.values()) |file_index| { @@ -3810,11 +3807,40 @@ fn performAllTheWorkInner( const pt: Zcu.PerThread = .activate(zcu, .main); defer pt.deactivate(); + // If the cache mode is `whole`, then add every source file to the cache manifest. + switch (comp.cache_use) { + .whole => |whole| if (whole.cache_manifest) |man| { + const gpa = zcu.gpa; + for (zcu.import_table.values()) |file_index| { + const file = zcu.fileByIndex(file_index); + const source = file.getSource(gpa) catch |err| { + try pt.reportRetryableFileError(file_index, "unable to load source: {s}", .{@errorName(err)}); + continue; + }; + const resolved_path = try std.fs.path.resolve(gpa, &.{ + file.mod.root.root_dir.path orelse ".", + file.mod.root.sub_path, + file.sub_file_path, + }); + errdefer gpa.free(resolved_path); + whole.cache_manifest_mutex.lock(); + defer whole.cache_manifest_mutex.unlock(); + man.addFilePostContents(resolved_path, source.bytes, source.stat) catch |err| switch (err) { + error.OutOfMemory => |e| return e, + else => { + try pt.reportRetryableFileError(file_index, "unable to update cache: {s}", .{@errorName(err)}); + continue; + }, + }; + } + }, + .incremental => {}, + } + try reportMultiModuleErrors(pt); const any_fatal_files = for (zcu.import_table.values()) |file_index| { const file = zcu.fileByIndex(file_index); - if (file.getMode() == .zon) continue; switch (file.status) { .never_loaded => unreachable, // everything is loaded by the workers .retryable_failure, .astgen_failure => break true, @@ -3822,7 +3848,7 @@ fn performAllTheWorkInner( } } else false; - if (any_fatal_files) { + if (any_fatal_files or comp.alloc_failure_occurred) { // We give up right now! No updating of ZIR refs, no nothing. The idea is that this prevents // us from invalidating lots of incremental dependencies due to files with e.g. parse errors. // However, this means our analysis data is invalid, so we want to omit all analysis errors. @@ -4290,7 +4316,6 @@ fn workerUpdateFile( wg: *WaitGroup, src: Zcu.AstGenSrc, ) void { - assert(file.getMode() == .zig); const child_prog_node = prog_node.start(file.sub_file_path, 0); defer child_prog_node.end(); @@ -4310,6 +4335,11 @@ fn workerUpdateFile( }, }; + switch (file.getMode()) { + .zig => {}, // continue to logic below + .zon => return, // ZON can't import anything so we're done + } + // Pre-emptively look for `@import` paths and queue them up. // If we experience an error preemptively fetching the // file, just ignore it and let it happen again later during Sema. @@ -4344,7 +4374,7 @@ fn workerUpdateFile( const imported_path_digest = pt.zcu.filePathDigest(res.file_index); break :blk .{ res, imported_path_digest }; }; - if (import_result.is_new and import_result.file.getMode() == .zig) { + if (import_result.is_new) { log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{ file.sub_file_path, import_path, import_result.file.sub_file_path, }); diff --git a/src/Sema.zig b/src/Sema.zig index 97b9fbbaf919..068d28209a6a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -13994,12 +13994,6 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return Air.internedToRef(ty); }, .zon => { - _ = result.file.getTree(zcu.gpa) catch |err| { - // TODO: these errors are file system errors; make sure an update() will - // retry this and not cache the file system error, which may be transient. - return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ result.file.sub_file_path, @errorName(err) }); - }; - if (extra.res_ty == .none) { return sema.fail(block, operand_src, "'@import' of ZON must have a known result type", .{}); } diff --git a/src/Sema/LowerZon.zig b/src/Sema/LowerZon.zig index 2b2d16b90a09..f30879090b87 100644 --- a/src/Sema/LowerZon.zig +++ b/src/Sema/LowerZon.zig @@ -39,8 +39,6 @@ pub fn run( ) CompileError!InternPool.Index { const pt = sema.pt; - _ = try file.getZoir(pt.zcu); - const tracked_inst = try pt.zcu.intern_pool.trackZir(pt.zcu.gpa, pt.tid, .{ .file = file_index, .inst = .main_struct_inst, // this is the only trackable instruction in a ZON file diff --git a/src/Zcu.zig b/src/Zcu.zig index f0262a146236..d94cfc10d6a1 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2643,6 +2643,189 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.F return zir; } +pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.Stat, zir: Zir) (std.fs.File.WriteError || Allocator.Error)!void { + const safety_buffer = if (data_has_safety_tag) + try gpa.alloc([8]u8, zir.instructions.len) + else + undefined; + defer if (data_has_safety_tag) gpa.free(safety_buffer); + + const data_ptr: [*]const u8 = if (data_has_safety_tag) + if (zir.instructions.len == 0) + undefined + else + @ptrCast(safety_buffer.ptr) + else + @ptrCast(zir.instructions.items(.data).ptr); + + if (data_has_safety_tag) { + // The `Data` union has a safety tag but in the file format we store it without. + for (zir.instructions.items(.data), 0..) |*data, i| { + const as_struct: *const HackDataLayout = @ptrCast(data); + safety_buffer[i] = as_struct.data; + } + } + + const header: Zir.Header = .{ + .instructions_len = @intCast(zir.instructions.len), + .string_bytes_len = @intCast(zir.string_bytes.len), + .extra_len = @intCast(zir.extra.len), + + .stat_size = stat.size, + .stat_inode = stat.inode, + .stat_mtime = stat.mtime, + }; + var iovecs: [5]std.posix.iovec_const = .{ + .{ + .base = @ptrCast(&header), + .len = @sizeOf(Zir.Header), + }, + .{ + .base = @ptrCast(zir.instructions.items(.tag).ptr), + .len = zir.instructions.len, + }, + .{ + .base = data_ptr, + .len = zir.instructions.len * 8, + }, + .{ + .base = zir.string_bytes.ptr, + .len = zir.string_bytes.len, + }, + .{ + .base = @ptrCast(zir.extra.ptr), + .len = zir.extra.len * 4, + }, + }; + try cache_file.writevAll(&iovecs); +} + +pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir) std.fs.File.WriteError!void { + const header: Zoir.Header = .{ + .nodes_len = @intCast(zoir.nodes.len), + .extra_len = @intCast(zoir.extra.len), + .limbs_len = @intCast(zoir.limbs.len), + .string_bytes_len = @intCast(zoir.string_bytes.len), + .compile_errors_len = @intCast(zoir.compile_errors.len), + .error_notes_len = @intCast(zoir.error_notes.len), + + .stat_size = stat.size, + .stat_inode = stat.inode, + .stat_mtime = stat.mtime, + }; + var iovecs: [9]std.posix.iovec_const = .{ + .{ + .base = @ptrCast(&header), + .len = @sizeOf(Zoir.Header), + }, + .{ + .base = @ptrCast(zoir.nodes.items(.tag)), + .len = zoir.nodes.len * @sizeOf(Zoir.Node.Repr.Tag), + }, + .{ + .base = @ptrCast(zoir.nodes.items(.data)), + .len = zoir.nodes.len * 4, + }, + .{ + .base = @ptrCast(zoir.nodes.items(.ast_node)), + .len = zoir.nodes.len * 4, + }, + .{ + .base = @ptrCast(zoir.extra), + .len = zoir.extra.len * 4, + }, + .{ + .base = @ptrCast(zoir.limbs), + .len = zoir.limbs.len * 4, + }, + .{ + .base = zoir.string_bytes.ptr, + .len = zoir.string_bytes.len, + }, + .{ + .base = @ptrCast(zoir.compile_errors), + .len = zoir.compile_errors.len * @sizeOf(Zoir.CompileError), + }, + .{ + .base = @ptrCast(zoir.error_notes), + .len = zoir.error_notes.len * @sizeOf(Zoir.CompileError.Note), + }, + }; + try cache_file.writevAll(&iovecs); +} + +pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_file: std.fs.File) !Zoir { + var zoir: Zoir = .{ + .nodes = .empty, + .extra = &.{}, + .limbs = &.{}, + .string_bytes = &.{}, + .compile_errors = &.{}, + .error_notes = &.{}, + }; + errdefer zoir.deinit(gpa); + + zoir.nodes = nodes: { + var nodes: std.MultiArrayList(Zoir.Node.Repr) = .empty; + defer nodes.deinit(gpa); + try nodes.setCapacity(gpa, header.nodes_len); + nodes.len = header.nodes_len; + break :nodes nodes.toOwnedSlice(); + }; + + zoir.extra = try gpa.alloc(u32, header.extra_len); + zoir.limbs = try gpa.alloc(std.math.big.Limb, header.limbs_len); + zoir.string_bytes = try gpa.alloc(u8, header.string_bytes_len); + + zoir.compile_errors = try gpa.alloc(Zoir.CompileError, header.compile_errors_len); + zoir.error_notes = try gpa.alloc(Zoir.CompileError.Note, header.error_notes_len); + + var iovecs: [8]std.posix.iovec = .{ + .{ + .base = @ptrCast(zoir.nodes.items(.tag)), + .len = header.nodes_len * @sizeOf(Zoir.Node.Repr.Tag), + }, + .{ + .base = @ptrCast(zoir.nodes.items(.data)), + .len = header.nodes_len * 4, + }, + .{ + .base = @ptrCast(zoir.nodes.items(.ast_node)), + .len = header.nodes_len * 4, + }, + .{ + .base = @ptrCast(zoir.extra), + .len = header.extra_len * 4, + }, + .{ + .base = @ptrCast(zoir.limbs), + .len = header.limbs_len * @sizeOf(std.math.big.Limb), + }, + .{ + .base = zoir.string_bytes.ptr, + .len = header.string_bytes_len, + }, + .{ + .base = @ptrCast(zoir.compile_errors), + .len = header.compile_errors_len * @sizeOf(Zoir.CompileError), + }, + .{ + .base = @ptrCast(zoir.error_notes), + .len = header.error_notes_len * @sizeOf(Zoir.CompileError.Note), + }, + }; + + const bytes_expected = expected: { + var n: usize = 0; + for (iovecs) |v| n += v.len; + break :expected n; + }; + + const bytes_read = try cache_file.readvAll(&iovecs); + if (bytes_read != bytes_expected) return error.UnexpectedFileSize; + return zoir; +} + pub fn markDependeeOutdated( zcu: *Zcu, /// When we are diffing ZIR and marking things as outdated, we won't yet have marked the dependencies as PO. diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index d45092ad7dfc..18ec135ab2bb 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -26,6 +26,8 @@ const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Zcu = @import("../Zcu.zig"); const Zir = std.zig.Zir; +const Zoir = std.zig.Zoir; +const ZonGen = std.zig.ZonGen; zcu: *Zcu, @@ -73,6 +75,8 @@ pub fn destroyFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void { if (!is_builtin) gpa.destroy(file); } +/// Ensures that `file` has up-to-date ZIR. If not, loads the ZIR cache or runs +/// AstGen as needed. Also updates `file.status`. pub fn updateFile( pt: Zcu.PerThread, file: *Zcu.File, @@ -126,6 +130,24 @@ pub fn updateFile( }, }; + // The old compile error, if any, is no longer relevant. + pt.lockAndClearFileCompileError(file); + + // If `zir` is not null, and `prev_zir` is null, then `TrackedInst`s are associated with `zir`. + // We need to keep it around! + // As an optimization, also check `loweringFailed`; if true, but `prev_zir == null`, then this + // file has never passed AstGen, so we actually need not cache the old ZIR. + if (file.zir != null and file.prev_zir == null and !file.zir.?.loweringFailed()) { + assert(file.prev_zir == null); + const prev_zir_ptr = try gpa.create(Zir); + file.prev_zir = prev_zir_ptr; + prev_zir_ptr.* = file.zir.?; + file.zir = null; + } + + // We're going to re-load everything, so unload source, AST, ZIR, ZOIR. + file.unload(gpa); + // We ask for a lock in order to coordinate with other zig processes. // If another process is already working on this file, we will get the cached // version. Likewise if we're working on AstGen and another process asks for @@ -180,175 +202,164 @@ pub fn updateFile( }; defer cache_file.close(); - while (true) { - update: { - // First we read the header to determine the lengths of arrays. - const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) { - // This can happen if Zig bails out of this function between creating - // the cached file and writing it. - error.EndOfStream => break :update, - else => |e| return e, - }; - const unchanged_metadata = - stat.size == header.stat_size and - stat.mtime == header.stat_mtime and - stat.inode == header.stat_inode; - - if (!unchanged_metadata) { - log.debug("AstGen cache stale: {s}", .{file.sub_file_path}); - break :update; - } - log.debug("AstGen cache hit: {s} instructions_len={d}", .{ - file.sub_file_path, header.instructions_len, - }); - - file.zir = Zcu.loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) { - error.UnexpectedFileSize => { - log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}); - break :update; - }, - else => |e| return e, - }; - file.stat = .{ - .size = header.stat_size, - .inode = header.stat_inode, - .mtime = header.stat_mtime, - }; - file.status = .success; - log.debug("AstGen cached success: {s}", .{file.sub_file_path}); - - if (file.zir.?.hasCompileErrors()) { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); - } - if (file.zir.?.loweringFailed()) { - file.status = .astgen_failure; - return error.AnalysisFail; - } - return; + const need_update = while (true) { + const result = switch (file.getMode()) { + inline else => |mode| try loadZirZoirCache(zcu, cache_file, stat, file, mode), + }; + switch (result) { + .success => { + log.debug("AstGen cached success: {s}", .{file.sub_file_path}); + break false; + }, + .invalid => {}, + .truncated => log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}), + .stale => log.debug("AstGen cache stale: {s}", .{file.sub_file_path}), } // If we already have the exclusive lock then it is our job to update. - if (builtin.os.tag == .wasi or lock == .exclusive) break; + if (builtin.os.tag == .wasi or lock == .exclusive) break true; // Otherwise, unlock to give someone a chance to get the exclusive lock // and then upgrade to an exclusive lock. cache_file.unlock(); lock = .exclusive; try cache_file.lock(lock); - } + }; - // The cache is definitely stale so delete the contents to avoid an underwrite later. - cache_file.setEndPos(0) catch |err| switch (err) { - error.FileTooBig => unreachable, // 0 is not too big + if (need_update) { + // The cache is definitely stale so delete the contents to avoid an underwrite later. + cache_file.setEndPos(0) catch |err| switch (err) { + error.FileTooBig => unreachable, // 0 is not too big + else => |e| return e, + }; - else => |e| return e, - }; + if (stat.size > std.math.maxInt(u32)) + return error.FileTooBig; - pt.lockAndClearFileCompileError(file); + const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); + defer if (file.source == null) gpa.free(source); + const amt = try source_file.readAll(source); + if (amt != stat.size) + return error.UnexpectedEndOfFile; - // If `zir` is not null, and `prev_zir` is null, then `TrackedInst`s are associated with `zir`. - // We need to keep it around! - // As an optimization, also check `loweringFailed`; if true, but `prev_zir == null`, then this - // file has never passed AstGen, so we actually need not cache the old ZIR. - if (file.zir != null and file.prev_zir == null and !file.zir.?.loweringFailed()) { - assert(file.prev_zir == null); - const prev_zir_ptr = try gpa.create(Zir); - file.prev_zir = prev_zir_ptr; - prev_zir_ptr.* = file.zir.?; - file.zir = null; - } - file.unload(gpa); + file.source = source; + + // Any potential AST errors are converted to ZIR errors when we run AstGen/ZonGen. + file.tree = try Ast.parse(gpa, source, file.getMode()); - if (stat.size > std.math.maxInt(u32)) - return error.FileTooBig; + switch (file.getMode()) { + .zig => { + file.zir = try AstGen.generate(gpa, file.tree.?); + Zcu.saveZirCache(gpa, cache_file, stat, file.zir.?) catch |err| switch (err) { + error.OutOfMemory => |e| return e, + else => log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{ + file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), + }), + }; + }, + .zon => { + file.zoir = try ZonGen.generate(gpa, file.tree.?, .{}); + Zcu.saveZoirCache(cache_file, stat, file.zoir.?) catch |err| { + log.warn("unable to write cached ZOIR code for {}{s} to {}{s}: {s}", .{ + file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), + }); + }; + }, + } - const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); - defer if (file.source == null) gpa.free(source); - const amt = try source_file.readAll(source); - if (amt != stat.size) - return error.UnexpectedEndOfFile; + log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); + } file.stat = .{ .size = stat.size, .inode = stat.inode, .mtime = stat.mtime, }; - file.source = source; - file.tree = try Ast.parse(gpa, source, .zig); + // Now, `zir` or `zoir` is definitely populated and up-to-date. + // Mark file successes/failures as needed. - // Any potential AST errors are converted to ZIR errors here. - file.zir = try AstGen.generate(gpa, file.tree.?); - file.status = .success; - log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); + switch (file.getMode()) { + .zig => { + if (file.zir.?.hasCompileErrors()) { + comp.mutex.lock(); + defer comp.mutex.unlock(); + try zcu.failed_files.putNoClobber(gpa, file, null); + } + if (file.zir.?.loweringFailed()) { + file.status = .astgen_failure; + } else { + file.status = .success; + } + }, + .zon => { + if (file.zoir.?.hasCompileErrors()) { + file.status = .astgen_failure; + comp.mutex.lock(); + defer comp.mutex.unlock(); + try zcu.failed_files.putNoClobber(gpa, file, null); + } else { + file.status = .success; + } + }, + } - const safety_buffer = if (Zcu.data_has_safety_tag) - try gpa.alloc([8]u8, file.zir.?.instructions.len) - else - undefined; - defer if (Zcu.data_has_safety_tag) gpa.free(safety_buffer); - const data_ptr = if (Zcu.data_has_safety_tag) - if (file.zir.?.instructions.len == 0) - @as([*]const u8, undefined) - else - @as([*]const u8, @ptrCast(safety_buffer.ptr)) - else - @as([*]const u8, @ptrCast(file.zir.?.instructions.items(.data).ptr)); - if (Zcu.data_has_safety_tag) { - // The `Data` union has a safety tag but in the file format we store it without. - for (file.zir.?.instructions.items(.data), 0..) |*data, i| { - const as_struct: *const Zcu.HackDataLayout = @ptrCast(data); - safety_buffer[i] = as_struct.data; - } + switch (file.status) { + .never_loaded => unreachable, + .retryable_failure => unreachable, + .astgen_failure => return error.AnalysisFail, + .success => return, } +} - const header: Zir.Header = .{ - .instructions_len = @as(u32, @intCast(file.zir.?.instructions.len)), - .string_bytes_len = @as(u32, @intCast(file.zir.?.string_bytes.len)), - .extra_len = @as(u32, @intCast(file.zir.?.extra.len)), +fn loadZirZoirCache( + zcu: *Zcu, + cache_file: std.fs.File, + stat: std.fs.File.Stat, + file: *Zcu.File, + comptime mode: Ast.Mode, +) !enum { success, invalid, truncated, stale } { + assert(file.getMode() == mode); - .stat_size = stat.size, - .stat_inode = stat.inode, - .stat_mtime = stat.mtime, - }; - var iovecs = [_]std.posix.iovec_const{ - .{ - .base = @as([*]const u8, @ptrCast(&header)), - .len = @sizeOf(Zir.Header), - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.?.instructions.items(.tag).ptr)), - .len = file.zir.?.instructions.len, - }, - .{ - .base = data_ptr, - .len = file.zir.?.instructions.len * 8, - }, - .{ - .base = file.zir.?.string_bytes.ptr, - .len = file.zir.?.string_bytes.len, - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.?.extra.ptr)), - .len = file.zir.?.extra.len * 4, - }, + const gpa = zcu.gpa; + + const Header = switch (mode) { + .zig => Zir.Header, + .zon => Zoir.Header, }; - cache_file.writevAll(&iovecs) catch |err| { - log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{ - file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), - }); + + // First we read the header to determine the lengths of arrays. + const header = cache_file.reader().readStruct(Header) catch |err| switch (err) { + // This can happen if Zig bails out of this function between creating + // the cached file and writing it. + error.EndOfStream => return .invalid, + else => |e| return e, }; - if (file.zir.?.hasCompileErrors()) { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); + const unchanged_metadata = + stat.size == header.stat_size and + stat.mtime == header.stat_mtime and + stat.inode == header.stat_inode; + + if (!unchanged_metadata) { + return .stale; } - if (file.zir.?.loweringFailed()) { - file.status = .astgen_failure; - return error.AnalysisFail; + + switch (mode) { + .zig => { + file.zir = Zcu.loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) { + error.UnexpectedFileSize => return .truncated, + else => |e| return e, + }; + }, + .zon => { + file.zoir = Zcu.loadZoirCacheBody(gpa, header, cache_file) catch |err| switch (err) { + error.UnexpectedFileSize => return .truncated, + else => |e| return e, + }; + }, } + + return .success; } const UpdatedFile = struct { @@ -1819,7 +1830,6 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { defer tracy.end(); const zcu = pt.zcu; - const gpa = zcu.gpa; const file = zcu.fileByIndex(file_index); assert(file.getMode() == .zig); assert(zcu.fileRootType(file_index) == .none); @@ -1834,36 +1844,6 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { }); const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index, false); errdefer zcu.intern_pool.remove(pt.tid, struct_ty); - - switch (zcu.comp.cache_use) { - .whole => |whole| if (whole.cache_manifest) |man| { - const source = file.getSource(gpa) catch |err| { - try pt.reportRetryableFileError(file_index, "unable to load source: {s}", .{@errorName(err)}); - return error.AnalysisFail; - }; - - const resolved_path = std.fs.path.resolve(gpa, &.{ - file.mod.root.root_dir.path orelse ".", - file.mod.root.sub_path, - file.sub_file_path, - }) catch |err| { - try pt.reportRetryableFileError(file_index, "unable to resolve path: {s}", .{@errorName(err)}); - return error.AnalysisFail; - }; - errdefer gpa.free(resolved_path); - - whole.cache_manifest_mutex.lock(); - defer whole.cache_manifest_mutex.unlock(); - man.addFilePostContents(resolved_path, source.bytes, source.stat) catch |err| switch (err) { - error.OutOfMemory => |e| return e, - else => { - try pt.reportRetryableFileError(file_index, "unable to update cache: {s}", .{@errorName(err)}); - return error.AnalysisFail; - }, - }; - }, - .incremental => {}, - } } pub fn importPkg(pt: Zcu.PerThread, mod: *Module) Allocator.Error!Zcu.ImportFileResult { @@ -2800,8 +2780,16 @@ pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Err /// Removes any entry from `Zcu.failed_files` associated with `file`. Acquires `Compilation.mutex` as needed. /// `file.zir` must be unchanged from the last update, as it is used to determine if there is such an entry. fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { - const zir = file.zir orelse return; - if (!zir.hasCompileErrors()) return; + switch (file.getMode()) { + .zig => { + const zir = file.zir orelse return; + if (!zir.hasCompileErrors()) return; + }, + .zon => { + const zoir = file.zoir orelse return; + if (!zoir.hasCompileErrors()) return; + }, + } pt.zcu.comp.mutex.lock(); defer pt.zcu.comp.mutex.unlock(); From 3ca588bcc6d6640b7faa41a271580dd384963927 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 4 Feb 2025 14:42:09 +0000 Subject: [PATCH 6/9] compiler: integrate importing ZON with incremental compilation The changes from a few commits earlier, where semantic analysis no longer occurs if any Zig files failed to lower to ZIR, mean `file` dependencies are no longer necessary! However, we now need them for ZON files, to be invalidated whenever a ZON file changes. --- src/Compilation.zig | 16 ++++++++++++---- src/InternPool.zig | 20 ++++++++------------ src/Sema.zig | 3 +-- src/Zcu.zig | 16 ++++++++++++---- src/Zcu/PerThread.zig | 15 +++++++++++++++ 5 files changed, 48 insertions(+), 22 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 93ce5ba6bee5..14c216854e88 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2903,10 +2903,12 @@ pub fn makeBinFileWritable(comp: *Compilation) !void { const Header = extern struct { intern_pool: extern struct { thread_count: u32, - file_deps_len: u32, src_hash_deps_len: u32, nav_val_deps_len: u32, nav_ty_deps_len: u32, + interned_deps_len: u32, + zon_file_deps_len: u32, + embed_file_deps_len: u32, namespace_deps_len: u32, namespace_name_deps_len: u32, first_dependency_len: u32, @@ -2947,10 +2949,12 @@ pub fn saveState(comp: *Compilation) !void { const header: Header = .{ .intern_pool = .{ .thread_count = @intCast(ip.locals.len), - .file_deps_len = @intCast(ip.file_deps.count()), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), .nav_val_deps_len = @intCast(ip.nav_val_deps.count()), .nav_ty_deps_len = @intCast(ip.nav_ty_deps.count()), + .interned_deps_len = @intCast(ip.interned_deps.count()), + .zon_file_deps_len = @intCast(ip.zon_file_deps.count()), + .embed_file_deps_len = @intCast(ip.embed_file_deps.count()), .namespace_deps_len = @intCast(ip.namespace_deps.count()), .namespace_name_deps_len = @intCast(ip.namespace_name_deps.count()), .first_dependency_len = @intCast(ip.first_dependency.count()), @@ -2975,14 +2979,18 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs, mem.asBytes(&header)); addBuf(&bufs, mem.sliceAsBytes(pt_headers.items)); - addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.keys())); - addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.nav_ty_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.nav_ty_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.interned_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.interned_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.zon_file_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.zon_file_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.embed_file_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.embed_file_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.keys())); diff --git a/src/InternPool.zig b/src/InternPool.zig index 4a6b5a86d2a7..60d24223ce2d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -17,13 +17,6 @@ tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32), /// Cached shift amount to put a `tid` in the top bits of a 32-bit value. tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32), -/// Dependencies on whether an entire file gets past AstGen. -/// These are triggered by `@import`, so that: -/// * if a file initially fails AstGen, triggering a transitive failure, when a future update -/// causes it to succeed AstGen, the `@import` is re-analyzed, allowing analysis to proceed -/// * if a file initially succeds AstGen, but a future update causes the file to fail it, -/// the `@import` is re-analyzed, registering a transitive failure -file_deps: std.AutoArrayHashMapUnmanaged(FileIndex, DepEntry.Index), /// Dependencies on the source code hash associated with a ZIR instruction. /// * For a `declaration`, this is the entire declaration body. /// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations). @@ -42,6 +35,9 @@ nav_ty_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index), /// * a container type requiring resolution (invalidated when the type must be recreated at a new index) /// Value is index into `dep_entries` of the first dependency on this interned value. interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index), +/// Dependencies on a ZON file. Triggered by `@import` of ZON. +/// Value is index into `dep_entries` of the first dependency on this ZON file. +zon_file_deps: std.AutoArrayHashMapUnmanaged(FileIndex, DepEntry.Index), /// Dependencies on an embedded file. /// Introduced by `@embedFile`; invalidated when the file changes. /// Value is index into `dep_entries` of the first dependency on this `Zcu.EmbedFile`. @@ -89,11 +85,11 @@ pub const empty: InternPool = .{ .tid_shift_30 = if (single_threaded) 0 else 31, .tid_shift_31 = if (single_threaded) 0 else 31, .tid_shift_32 = if (single_threaded) 0 else 31, - .file_deps = .empty, .src_hash_deps = .empty, .nav_val_deps = .empty, .nav_ty_deps = .empty, .interned_deps = .empty, + .zon_file_deps = .empty, .embed_file_deps = .empty, .namespace_deps = .empty, .namespace_name_deps = .empty, @@ -824,11 +820,11 @@ pub const Nav = struct { }; pub const Dependee = union(enum) { - file: FileIndex, src_hash: TrackedInst.Index, nav_val: Nav.Index, nav_ty: Nav.Index, interned: Index, + zon_file: FileIndex, embed_file: Zcu.EmbedFile.Index, namespace: TrackedInst.Index, namespace_name: NamespaceNameKey, @@ -876,11 +872,11 @@ pub const DependencyIterator = struct { pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator { const first_entry = switch (dependee) { - .file => |x| ip.file_deps.get(x), .src_hash => |x| ip.src_hash_deps.get(x), .nav_val => |x| ip.nav_val_deps.get(x), .nav_ty => |x| ip.nav_ty_deps.get(x), .interned => |x| ip.interned_deps.get(x), + .zon_file => |x| ip.zon_file_deps.get(x), .embed_file => |x| ip.embed_file_deps.get(x), .namespace => |x| ip.namespace_deps.get(x), .namespace_name => |x| ip.namespace_name_deps.get(x), @@ -947,11 +943,11 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend }, inline else => |dependee_payload, tag| new_index: { const gop = try switch (tag) { - .file => ip.file_deps, .src_hash => ip.src_hash_deps, .nav_val => ip.nav_val_deps, .nav_ty => ip.nav_ty_deps, .interned => ip.interned_deps, + .zon_file => ip.zon_file_deps, .embed_file => ip.embed_file_deps, .namespace => ip.namespace_deps, .namespace_name => ip.namespace_name_deps, @@ -6688,11 +6684,11 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { pub fn deinit(ip: *InternPool, gpa: Allocator) void { if (debug_state.enable_checks) std.debug.assert(debug_state.intern_pool == null); - ip.file_deps.deinit(gpa); ip.src_hash_deps.deinit(gpa); ip.nav_val_deps.deinit(gpa); ip.nav_ty_deps.deinit(gpa); ip.interned_deps.deinit(gpa); + ip.zon_file_deps.deinit(gpa); ip.embed_file_deps.deinit(gpa); ip.namespace_deps.deinit(gpa); ip.namespace_name_deps.deinit(gpa); diff --git a/src/Sema.zig b/src/Sema.zig index 068d28209a6a..ad144bf0133a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6143,7 +6143,6 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr pt.updateFile(result.file, path_digest) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); - try sema.declareDependency(.{ .file = result.file_index }); try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); try sema.declareDependency(.{ .interned = ty }); @@ -13986,7 +13985,6 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }; switch (result.file.getMode()) { .zig => { - try sema.declareDependency(.{ .file = result.file_index }); try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); try sema.declareDependency(.{ .interned = ty }); @@ -14003,6 +14001,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.fail(block, operand_src, "'@import' of ZON must have a known result type", .{}); } + try sema.declareDependency(.{ .zon_file = result.file_index }); const interned = try LowerZon.run( sema, result.file, diff --git a/src/Zcu.zig b/src/Zcu.zig index d94cfc10d6a1..9b292ad78354 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -705,6 +705,14 @@ pub const File = struct { /// field is populated with that old ZIR. prev_zir: ?*Zir = null, + /// This field serves a similar purpose to `prev_zir`, but for ZOIR. However, since we do not + /// need to map old ZOIR to new ZOIR -- instead only invalidating dependencies if the ZOIR + /// changed -- this field is just a simple boolean. + /// + /// When `zoir` is updated, this field is set to `true`. In `updateZirRefs`, if this is `true`, + /// we invalidate the corresponding `zon_file` dependency, and reset it to `false`. + zoir_invalidated: bool = false, + /// A single reference to a file. pub const Reference = union(enum) { /// The file is imported directly (i.e. not as a package) with @import. @@ -4074,10 +4082,6 @@ fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, com const zcu = data.zcu; const ip = &zcu.intern_pool; switch (data.dependee) { - .file => |file| { - const file_path = zcu.fileByIndex(file).sub_file_path; - return writer.print("file('{s}')", .{file_path}); - }, .src_hash => |ti| { const info = ti.resolveFull(ip) orelse { return writer.writeAll("inst()"); @@ -4098,6 +4102,10 @@ fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, com .func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}), else => unreachable, }, + .zon_file => |file| { + const file_path = zcu.fileByIndex(file).sub_file_path; + return writer.print("zon_file('{s}')", .{file_path}); + }, .embed_file => |ef_idx| { const ef = ef_idx.get(zcu); return writer.print("embed_file('{s}')", .{std.fs.path.fmtJoin(&.{ diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 18ec135ab2bb..a28a2ae7ec2b 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -145,6 +145,9 @@ pub fn updateFile( file.zir = null; } + // If ZOIR is changing, then we need to invalidate dependencies on it + if (file.zoir != null) file.zoir_invalidated = true; + // We're going to re-load everything, so unload source, AST, ZIR, ZOIR. file.unload(gpa); @@ -380,11 +383,23 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { const gpa = zcu.gpa; // We need to visit every updated File for every TrackedInst in InternPool. + // This only includes Zig files; ZON files are omitted. var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .empty; defer cleanupUpdatedFiles(gpa, &updated_files); + for (zcu.import_table.values()) |file_index| { const file = zcu.fileByIndex(file_index); assert(file.status == .success); + switch (file.getMode()) { + .zig => {}, // logic below + .zon => { + if (file.zoir_invalidated) { + try zcu.markDependeeOutdated(.not_marked_po, .{ .zon_file = file_index }); + file.zoir_invalidated = false; + } + continue; + }, + } const old_zir = file.prev_zir orelse continue; const new_zir = file.zir.?; const gop = try updated_files.getOrPut(gpa, file_index); From fb481d0bf81df7b0ce9afde5cd615133d9895726 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 4 Feb 2025 15:05:40 +0000 Subject: [PATCH 7/9] Zcu: fix bug clearing compile errors And add an assertion in safe builds that our initial check is actually correct. --- src/Zcu/PerThread.zig | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index a28a2ae7ec2b..32372bf597d5 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2795,21 +2795,32 @@ pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Err /// Removes any entry from `Zcu.failed_files` associated with `file`. Acquires `Compilation.mutex` as needed. /// `file.zir` must be unchanged from the last update, as it is used to determine if there is such an entry. fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { - switch (file.getMode()) { - .zig => { - const zir = file.zir orelse return; - if (!zir.hasCompileErrors()) return; - }, - .zon => { - const zoir = file.zoir orelse return; - if (!zoir.hasCompileErrors()) return; + const maybe_has_error = switch (file.status) { + .never_loaded => false, + .retryable_failure => true, + .astgen_failure => true, + .success => switch (file.getMode()) { + .zig => has_error: { + const zir = file.zir orelse break :has_error false; + break :has_error zir.hasCompileErrors(); + }, + .zon => has_error: { + const zoir = file.zoir orelse break :has_error false; + break :has_error zoir.hasCompileErrors(); + }, }, + }; + + // If runtime safety is on, let's quickly lock the mutex and check anyway. + if (!maybe_has_error and !std.debug.runtime_safety) { + return; } pt.zcu.comp.mutex.lock(); defer pt.zcu.comp.mutex.unlock(); if (pt.zcu.failed_files.fetchSwapRemove(file)) |kv| { - if (kv.value) |msg| msg.destroy(pt.zcu.gpa); // Delete previous error message. + assert(maybe_has_error); // the runtime safety case above + if (kv.value) |msg| msg.destroy(pt.zcu.gpa); // delete previous error message } } From d60910c9d0fa4d28a4f738e95357ddada84ae106 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 4 Feb 2025 15:13:06 +0000 Subject: [PATCH 8/9] incremental: add ZON test --- test/incremental/change_zon_file | 46 ++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 test/incremental/change_zon_file diff --git a/test/incremental/change_zon_file b/test/incremental/change_zon_file new file mode 100644 index 000000000000..da55c8ad8352 --- /dev/null +++ b/test/incremental/change_zon_file @@ -0,0 +1,46 @@ +#target=x86_64-linux-selfhosted +#target=x86_64-linux-cbe +#target=x86_64-windows-cbe +//#target=wasm32-wasi-selfhosted +#update=initial version +#file=main.zig +const std = @import("std"); +const message: []const u8 = @import("message.zon"); +pub fn main() !void { + try std.io.getStdOut().writeAll(message); +} +#file=message.zon +"Hello, World!\n" +#expect_stdout="Hello, World!\n" + +#update=change ZON file contents +#file=message.zon +"Hello again, World!\n" +#expect_stdout="Hello again, World!\n" + +#update=delete file +#rm_file=message.zon +#expect_error=message.zon:1:1: error: unable to load './message.zon': FileNotFound + +#update=remove reference to ZON file +#file=main.zig +const std = @import("std"); +const message: []const u8 = @import("message.zon"); +pub fn main() !void { + try std.io.getStdOut().writeAll("a hardcoded string\n"); +} +#expect_error=message.zon:1:1: error: unable to load './message.zon': FileNotFound + +#update=recreate ZON file +#file=message.zon +"We're back, World!\n" +#expect_stdout="a hardcoded string\n" + +#update=re-introduce reference to ZON file +#file=main.zig +const std = @import("std"); +const message: []const u8 = @import("message.zon"); +pub fn main() !void { + try std.io.getStdOut().writeAll(message); +} +#expect_stdout="We're back, World!\n" From bebfa036ba52076cd03f9ef943f61da64ba6e97b Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 4 Feb 2025 18:30:50 +0000 Subject: [PATCH 9/9] test: remove failing case Unfortunately, now that this error is more in line with other `@import` errors, it isn't so easy to have a test case for. --- test/cases/compile_errors/@import_zon_bad_import.zig | 9 --------- 1 file changed, 9 deletions(-) delete mode 100644 test/cases/compile_errors/@import_zon_bad_import.zig diff --git a/test/cases/compile_errors/@import_zon_bad_import.zig b/test/cases/compile_errors/@import_zon_bad_import.zig deleted file mode 100644 index 4df03ce2dbfb..000000000000 --- a/test/cases/compile_errors/@import_zon_bad_import.zig +++ /dev/null @@ -1,9 +0,0 @@ -export fn entry() void { - _ = @import( - "bogus-does-not-exist.zon", - ); -} - -// error -// -// :3:9: error: unable to open 'bogus-does-not-exist.zon': FileNotFound