From e01e45d7a5b654a4f035045a8f81d58d20f194e2 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Tue, 26 Nov 2024 17:28:40 +0100 Subject: [PATCH 01/31] set minimum runtime version to `0.14.0-dev.2046+b8795b4d0` --- .github/workflows/build_runner.yml | 6 +----- build.zig | 6 ++++-- src/build_runner/BuildRunnerVersion.zig | 24 ++++++++++----------- src/build_runner/{0.12.0.zig => master.zig} | 10 ++++----- 4 files changed, 21 insertions(+), 25 deletions(-) rename src/build_runner/{0.12.0.zig => master.zig} (99%) diff --git a/.github/workflows/build_runner.yml b/.github/workflows/build_runner.yml index fb1d93494..e6c7db434 100644 --- a/.github/workflows/build_runner.yml +++ b/.github/workflows/build_runner.yml @@ -20,11 +20,7 @@ jobs: matrix: include: - zig-version: master - build-runner-file: 0.12.0.zig - - zig-version: 0.13.0 - build-runner-file: 0.12.0.zig # The Zig 0.12.0 build runner is also compatible with Zig 0.13.0 - - zig-version: 0.12.0 - build-runner-file: 0.12.0.zig + build-runner-file: master.zig runs-on: ubuntu-latest steps: diff --git a/build.zig b/build.zig index 004ba8265..aab9f25ad 100644 --- a/build.zig +++ b/build.zig @@ -18,14 +18,14 @@ const zls_version = std.SemanticVersion{ .major = 0, .minor = 14, .patch = 0, .p const minimum_build_zig_version = "0.14.0-dev.2472+cc82620b2"; /// Specify the minimum Zig version that is required to run ZLS: -/// Release 0.12.0 +/// Build Runner: Implement File System Watching for kqueue /// /// Examples of reasons that would cause the minimum runtime version to be bumped are: /// - breaking change to the Zig Syntax /// - breaking change to AstGen (i.e `zig ast-check`) /// /// A breaking change to the Zig Build System should be handled by updating ZLS's build runner (see src\build_runner) -const minimum_runtime_zig_version = "0.12.0"; +const minimum_runtime_zig_version = "0.14.0-dev.2046+b8795b4d0"; const release_targets = [_]std.Target.Query{ .{ .cpu_arch = .x86_64, .os_tag = .windows }, @@ -485,6 +485,8 @@ fn release(b: *Build, target_queries: []const std.Target.Query, release_artifact } const Build = blk: { + @setEvalBranchQuota(10_000); + const min_build_zig = std.SemanticVersion.parse(minimum_build_zig_version) catch unreachable; const min_runtime_zig = std.SemanticVersion.parse(minimum_runtime_zig_version) catch unreachable; diff --git a/src/build_runner/BuildRunnerVersion.zig b/src/build_runner/BuildRunnerVersion.zig index bddacfbc8..aaecb3a52 100644 --- a/src/build_runner/BuildRunnerVersion.zig +++ b/src/build_runner/BuildRunnerVersion.zig @@ -8,9 +8,7 @@ const build_options = @import("build_options"); /// /// The GitHub matrix in `.github\workflows\build_runner.yml` should be updated to check Zig master with the latest build runner file. pub const BuildRunnerVersion = enum { - // master, - @"0.13.0", - @"0.12.0", + master, pub fn isTaggedRelease(version: BuildRunnerVersion) bool { return !@hasField(BuildRunnerVersion, "master") or version != .master; @@ -23,9 +21,7 @@ pub const BuildRunnerVersion = enum { pub fn getBuildRunnerFile(version: BuildRunnerVersion) [:0]const u8 { return switch (version) { - // .master => @embedFile("master.zig"), - .@"0.13.0" => @embedFile("0.12.0.zig"), // The Zig 0.12.0 build runner is also compatible with Zig 0.13.0 - .@"0.12.0" => @embedFile("0.12.0.zig"), + inline else => |tag| @embedFile(@tagName(tag) ++ ".zig"), }; } }; @@ -106,13 +102,17 @@ test selectVersionInternal { const build_runner = BuildRunnerVersion.selectBuildRunnerVersion(build_options.version); try expect(build_runner != null); try expect(build_runner.?.isTaggedRelease()); - } else { - // A development build of ZLS should support the latest tagged release of Zig - // Example: ZLS 0.13.0-dev.1+aaaaaaaaa should support Zig 0.12.0 - const build_runner = BuildRunnerVersion.selectBuildRunnerVersion(.{ .major = build_options.version.major, .minor = build_options.version.minor - 1, .patch = 0 }); - try expect(build_runner != null); - try expect(build_runner.?.isTaggedRelease()); } + + // nah, not supported + // + // if (!is_zls_version_tagged_release) { + // // A development build of ZLS should support the latest tagged release of Zig + // // Example: ZLS 0.13.0-dev.1+aaaaaaaaa should support Zig 0.12.0 + // const build_runner = BuildRunnerVersion.selectBuildRunnerVersion(.{ .major = build_options.version.major, .minor = build_options.version.minor - 1, .patch = 0 }); + // try expect(build_runner != null); + // try expect(build_runner.?.isTaggedRelease()); + // } } { diff --git a/src/build_runner/0.12.0.zig b/src/build_runner/master.zig similarity index 99% rename from src/build_runner/0.12.0.zig rename to src/build_runner/master.zig index 688ebadb0..36fab781a 100644 --- a/src/build_runner/0.12.0.zig +++ b/src/build_runner/master.zig @@ -1,19 +1,17 @@ //! PLEASE READ THE FOLLOWING MESSAGE BEFORE EDITING THIS FILE: //! //! This build runner is targeting compatibility with the following Zig versions: -//! - Zig 0.12.0 -//! - Zig 0.13.0 -//! - master +//! - master (0.14.0-dev.2046+b8795b4d0 and later) //! //! Handling multiple Zig versions can be achieved by branching on the `builtin.zig_version` at comptime. //! As an example, see how `writeFile2_removed_version` or `std_progress_rework_version` are used to deal with breaking changes. //! //! You can test out the build runner on ZLS's `build.zig` with the following command: -//! `zig build --build-runner src/build_runner/0.12.0.zig` +//! `zig build --build-runner src/build_runner/master.zig` //! //! You can also test the build runner on any other `build.zig` with the following command: -//! `zig build --build-file /path/to/build.zig --build-runner /path/to/zls/src/build_runner/0.12.0.zig` -//! `zig build --build-runner /path/to/zls/src/build_runner/0.12.0.zig` (if the cwd contains build.zig) +//! `zig build --build-file /path/to/build.zig --build-runner /path/to/zls/src/build_runner/master.zig` +//! `zig build --build-runner /path/to/zls/src/build_runner/master.zig` (if the cwd contains build.zig) //! const root = @import("@build"); From 8f3d67eb6d538dd6c45bbb263ab584ef1122c52f Mon Sep 17 00:00:00 2001 From: Techatrix Date: Wed, 13 Nov 2024 19:04:30 +0100 Subject: [PATCH 02/31] remove old Zig version compatibility from build runner --- src/build_runner/master.zig | 162 ++++++++++-------------------------- 1 file changed, 46 insertions(+), 116 deletions(-) diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index 36fab781a..13120faf3 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -4,7 +4,7 @@ //! - master (0.14.0-dev.2046+b8795b4d0 and later) //! //! Handling multiple Zig versions can be achieved by branching on the `builtin.zig_version` at comptime. -//! As an example, see how `writeFile2_removed_version` or `std_progress_rework_version` are used to deal with breaking changes. +//! As an example, see how `child_type_coercion_version` is used to deal with breaking changes. //! //! You can test out the build runner on ZLS's `build.zig` with the following command: //! `zig build --build-runner src/build_runner/master.zig` @@ -29,16 +29,6 @@ pub const dependencies = @import("@dependencies"); // ----------- List of Zig versions that introduced breaking changes ----------- -const writeFile2_removed_version = - std.SemanticVersion.parse("0.13.0-dev.68+b86c4bde6") catch unreachable; -const std_progress_rework_version = - std.SemanticVersion.parse("0.13.0-dev.336+963ffe9d5") catch unreachable; -const file_watch_version = - std.SemanticVersion.parse("0.14.0-dev.283+1d20ff11d") catch unreachable; -const live_rebuild_processes = - std.SemanticVersion.parse("0.14.0-dev.310+9d38e82b5") catch unreachable; -const file_watch_windows_version = - std.SemanticVersion.parse("0.14.0-dev.625+2de0e2eca") catch unreachable; const child_type_coercion_version = std.SemanticVersion.parse("0.14.0-dev.2506+32354d119") catch unreachable; const accept_root_module_version = @@ -46,11 +36,6 @@ const accept_root_module_version = // ----------------------------------------------------------------------------- -const ProgressNode = if (builtin.zig_version.order(std_progress_rework_version) == .lt) - *std.Progress.Node -else - std.Progress.Node; - ///! This is a modified build runner to extract information out of build.zig ///! Modified version of lib/build_runner.zig pub fn main() !void { @@ -71,20 +56,16 @@ pub fn main() !void { var arg_idx: usize = 1; const zig_exe = nextArg(args, &arg_idx) orelse fatal("missing zig compiler path", .{}); - const zig_lib_directory = if (comptime builtin.zig_version.order(file_watch_version).compare(.gte)) blk: { - const zig_lib_dir = nextArg(args, &arg_idx) orelse fatal("missing zig lib directory path", .{}); - - const zig_lib_directory: std.Build.Cache.Directory = .{ - .path = zig_lib_dir, - .handle = try std.fs.cwd().openDir(zig_lib_dir, .{}), - }; - - break :blk zig_lib_directory; - } else {}; + const zig_lib_dir = nextArg(args, &arg_idx) orelse fatal("missing zig lib directory path", .{}); const build_root = nextArg(args, &arg_idx) orelse fatal("missing build root directory path", .{}); const cache_root = nextArg(args, &arg_idx) orelse fatal("missing cache root directory path", .{}); const global_cache_root = nextArg(args, &arg_idx) orelse fatal("missing global cache root directory path", .{}); + const zig_lib_directory: std.Build.Cache.Directory = .{ + .path = zig_lib_dir, + .handle = try std.fs.cwd().openDir(zig_lib_dir, .{}), + }; + const build_root_directory: std.Build.Cache.Directory = .{ .path = build_root, .handle = try std.fs.cwd().openDir(build_root, .{}), @@ -100,7 +81,7 @@ pub fn main() !void { .handle = try std.fs.cwd().makeOpenPath(global_cache_root, .{}), }; - var graph: std.Build.Graph = if (comptime builtin.zig_version.order(file_watch_version).compare(.gte)) .{ + var graph: std.Build.Graph = .{ .arena = arena, .cache = .{ .gpa = arena, @@ -114,19 +95,6 @@ pub fn main() !void { .query = .{}, .result = try std.zig.system.resolveTargetQuery(.{}), }, - } else .{ - .arena = arena, - .cache = .{ - .gpa = arena, - .manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}), - }, - .zig_exe = zig_exe, - .env_map = try process.getEnvMap(arena), - .global_cache_root = global_cache_directory, - .host = .{ - .query = .{}, - .result = try std.zig.system.resolveTargetQuery(.{}), - }, }; graph.cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); @@ -226,8 +194,6 @@ pub fn main() !void { const next_arg = nextArg(args, &arg_idx) orelse fatal("expected [all|new|failures|none] after '{s}'", .{arg}); _ = next_arg; - } else if ((comptime builtin.zig_version.order(file_watch_version) == .lt) and mem.eql(u8, arg, "--zig-lib-dir")) { - builder.zig_lib_dir = .{ .cwd_relative = nextArgOrFatal(args, &arg_idx) }; } else if (mem.eql(u8, arg, "--seed")) { const next_arg = nextArg(args, &arg_idx) orelse fatal("expected u32 after '{s}'", .{arg}); @@ -236,7 +202,7 @@ pub fn main() !void { next_arg, @errorName(err), }); }; - } else if ((builtin.zig_version.order(file_watch_version) != .lt) and mem.eql(u8, arg, "--debounce")) { + } else if (mem.eql(u8, arg, "--debounce")) { const next_arg = nextArg(args, &arg_idx) orelse fatal("expected u16 after '{s}'", .{arg}); debounce_interval_ms = std.fmt.parseUnsigned(u16, next_arg, 0) catch |err| { @@ -276,7 +242,7 @@ pub fn main() !void { builder.verbose_llvm_cpu_features = true; } else if (mem.eql(u8, arg, "--prominent-compile-errors")) { // prominent_compile_errors = true; - } else if ((builtin.zig_version.order(file_watch_version) != .lt) and mem.eql(u8, arg, "--watch")) { + } else if (mem.eql(u8, arg, "--watch")) { // watch mode will always be enabled if supported // watch = true; } else if (mem.eql(u8, arg, "-fwine")) { @@ -333,16 +299,9 @@ pub fn main() !void { } } - var progress = if (comptime builtin.zig_version.order(std_progress_rework_version) == .lt) - std.Progress{ .terminal = null } - else {}; - - const main_progress_node = if (comptime builtin.zig_version.order(std_progress_rework_version) == .lt) - progress.start("", 0) - else - std.Progress.start(.{ - .disable_printing = true, - }); + const main_progress_node = std.Progress.start(.{ + .disable_printing = true, + }); defer main_progress_node.end(); builder.debug_log_scopes = debug_log_scopes.items; @@ -362,12 +321,7 @@ pub fn main() !void { const s = std.fs.path.sep_str; const tmp_sub_path = "tmp" ++ s ++ (output_tmp_nonce orelse fatal("missing -Z arg", .{})); - const writeFileFn = if (comptime builtin.zig_version.order(writeFile2_removed_version) == .lt) - std.fs.Dir.writeFile2 - else - std.fs.Dir.writeFile; - - writeFileFn(local_cache_directory.handle, .{ + std.fs.Dir.writeFile(local_cache_directory.handle, .{ .sub_path = tmp_sub_path, .data = buffer.items, .flags = .{ .exclusive = true }, @@ -415,19 +369,7 @@ pub fn main() !void { seed, ); - const watch_suported = switch (builtin.os.tag) { - .linux => blk: { - if (comptime builtin.zig_version.order(file_watch_version) == .lt) break :blk false; - - // std.build.Watch requires `FAN_REPORT_TARGET_FID` which is Linux 5.17+ - const utsname = std.posix.uname(); - const version = std.SemanticVersion.parse(&utsname.release) catch break :blk true; - break :blk version.order(.{ .major = 5, .minor = 17, .patch = 0 }) != .lt; - }, - .windows => comptime builtin.zig_version.order(file_watch_windows_version) != .lt, - else => false, - }; - if (!watch_suported) return; + if (!Watch.have_impl) return; var w = try Watch.init(); var step_stack = try stepNamesToStepStack(gpa, builder, targets.items); @@ -568,37 +510,30 @@ fn prepare( } fn runSteps( - gpa: std.mem.Allocator, b: *std.Build, steps: []const *Step, - parent_prog_node: ProgressNode, + parent_prog_node: std.Progress.Node, run: *Run, ) error{ OutOfMemory, UncleanExit }!void { const thread_pool = &run.thread_pool; - { - var step_prog = parent_prog_node.start("steps", steps.len); - defer step_prog.end(); + var step_prog = parent_prog_node.start("steps", steps.len); + defer step_prog.end(); - var wait_group: std.Thread.WaitGroup = .{}; - defer wait_group.wait(); + var wait_group: std.Thread.WaitGroup = .{}; + defer wait_group.wait(); - // Here we spawn the initial set of tasks with a nice heuristic - - // dependency order. Each worker when it finishes a step will then - // check whether it should run any dependants. - for (steps) |step| { - if (step.state == .skipped_oom) continue; + // Here we spawn the initial set of tasks with a nice heuristic - + // dependency order. Each worker when it finishes a step will then + // check whether it should run any dependants. + for (steps) |step| { + if (step.state == .skipped_oom) continue; - wait_group.start(); - thread_pool.spawn(workerMakeOneStep, .{ - &wait_group, b, step, if (comptime builtin.zig_version.order(std_progress_rework_version) == .lt) &step_prog else step_prog, run, - }) catch @panic("OOM"); - } + wait_group.start(); + thread_pool.spawn(workerMakeOneStep, .{ + &wait_group, b, step, step_prog, run, + }) catch @panic("OOM"); } - assert(run.memory_blocked_steps.items.len == 0); - - _ = gpa; - // TODO collect std.zig.ErrorBundle's and stderr from failed steps and send them to ZLS } /// Traverse the dependency graph depth-first and make it undirected by having @@ -655,7 +590,7 @@ fn workerMakeOneStep( wg: *std.Thread.WaitGroup, b: *std.Build, s: *Step, - prog_node: ProgressNode, + prog_node: std.Progress.Node, run: *Run, ) void { defer wg.finish(); @@ -709,21 +644,21 @@ fn workerMakeOneStep( } var sub_prog_node = prog_node.start(s.name, 0); - if (comptime builtin.zig_version.order(std_progress_rework_version) == .lt) sub_prog_node.activate(); defer sub_prog_node.end(); - const make_result = s.make( - if (comptime builtin.zig_version.order(std_progress_rework_version) == .lt) - &sub_prog_node - else if (comptime builtin.zig_version.order(live_rebuild_processes) == .lt) - sub_prog_node - else - .{ - .progress_node = sub_prog_node, - .thread_pool = thread_pool, - .watch = false, - }, - ); + const make_result = s.make(.{ + .progress_node = sub_prog_node, + .thread_pool = thread_pool, + .watch = true, + }); + + const show_error_msgs = s.result_error_msgs.items.len > 0; + const show_compile_errors = s.result_error_bundle.errorMessageCount() > 0; + + if (show_error_msgs or show_compile_errors) { + // s.result_stderr + // TODO send to ZLS + } handle_result: { if (make_result) |_| { @@ -804,9 +739,7 @@ fn argsRest(args: ArgsType, idx: usize) ?ArgsType { /// --debug-build-runner-leaks which would make this function return instead of /// calling exit. fn cleanExit() void { - if (comptime builtin.zig_version.order(std_progress_rework_version) != .lt) { - std.debug.lockStdErr(); - } + std.debug.lockStdErr(); process.exit(0); } @@ -814,9 +747,7 @@ fn cleanExit() void { /// --debug-build-runner-leaks which would make this function return instead of /// calling exit. fn uncleanExit() error{UncleanExit} { - if (comptime builtin.zig_version.order(std_progress_rework_version) != .lt) { - std.debug.lockStdErr(); - } + std.debug.lockStdErr(); process.exit(1); } @@ -897,7 +828,7 @@ fn extractBuildInformation( gpa: Allocator, b: *std.Build, arena: Allocator, - main_progress_node: ProgressNode, + main_progress_node: std.Progress.Node, run: *Run, seed: u32, ) !void { @@ -1048,7 +979,6 @@ fn extractBuildInformation( // run all steps that are dependencies try runSteps( - gpa, b, step_dependencies.keys(), main_progress_node, From 3979eb0198a2d998e79ac4962b0243963ff66a3e Mon Sep 17 00:00:00 2001 From: Techatrix Date: Tue, 26 Nov 2024 12:25:10 +0100 Subject: [PATCH 03/31] change BuildConfig.zig to shared.zig --- src/DocumentStore.zig | 2 +- src/Server.zig | 6 +++--- src/build_runner/BuildConfig.zig | 14 -------------- src/build_runner/master.zig | 3 ++- src/build_runner/shared.zig | 16 ++++++++++++++++ 5 files changed, 22 insertions(+), 19 deletions(-) delete mode 100644 src/build_runner/BuildConfig.zig create mode 100644 src/build_runner/shared.zig diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 3814659ee..f55d917d6 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -8,7 +8,7 @@ const offsets = @import("offsets.zig"); const log = std.log.scoped(.zls_store); const Ast = std.zig.Ast; const BuildAssociatedConfig = @import("BuildAssociatedConfig.zig"); -const BuildConfig = @import("build_runner/BuildConfig.zig"); +const BuildConfig = @import("build_runner/shared.zig").BuildConfig; const tracy = @import("tracy"); const translate_c = @import("translate_c.zig"); const AstGen = std.zig.AstGen; diff --git a/src/Server.zig b/src/Server.zig index 8b137c3f6..ee682b5ed 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -1243,7 +1243,7 @@ fn resolveConfiguration( break :blk; }; const build_runner_source = build_runner_version.getBuildRunnerFile(); - const build_runner_config_source = @embedFile("build_runner/BuildConfig.zig"); + const build_runner_config_source = @embedFile("build_runner/shared.zig"); const build_runner_hash = get_hash: { const Hasher = std.crypto.auth.siphash.SipHash128(1, 3); @@ -1265,11 +1265,11 @@ fn resolveConfiguration( defer cache_dir.close(); cache_dir.writeFile(.{ - .sub_path = "BuildConfig.zig", + .sub_path = "shared.zig", .data = build_runner_config_source, .flags = .{ .exclusive = true }, }) catch |err| if (err != error.PathAlreadyExists) { - log.err("failed to write file '{s}/BuildConfig.zig': {}", .{ cache_path, err }); + log.err("failed to write file '{s}/shared.zig': {}", .{ cache_path, err }); break :blk; }; diff --git a/src/build_runner/BuildConfig.zig b/src/build_runner/BuildConfig.zig deleted file mode 100644 index f09b76656..000000000 --- a/src/build_runner/BuildConfig.zig +++ /dev/null @@ -1,14 +0,0 @@ -const std = @import("std"); - -deps_build_roots: []DepsBuildRoots, -packages: []Package, -include_dirs: []const []const u8, -top_level_steps: []const []const u8, -available_options: std.json.ArrayHashMap(AvailableOption), - -pub const DepsBuildRoots = Package; -pub const Package = struct { - name: []const u8, - path: []const u8, -}; -pub const AvailableOption = std.meta.FieldType(std.meta.FieldType(std.Build, .available_options_map).KV, .value); diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index 13120faf3..4bf0bc238 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -781,7 +781,8 @@ fn validateSystemLibraryOptions(b: *std.Build) void { // // -const BuildConfig = @import("BuildConfig.zig"); +const shared = @import("shared.zig"); +const BuildConfig = shared.BuildConfig; const Packages = struct { allocator: std.mem.Allocator, diff --git a/src/build_runner/shared.zig b/src/build_runner/shared.zig new file mode 100644 index 000000000..95e318e61 --- /dev/null +++ b/src/build_runner/shared.zig @@ -0,0 +1,16 @@ +const std = @import("std"); + +pub const BuildConfig = struct { + deps_build_roots: []DepsBuildRoots, + packages: []Package, + include_dirs: []const []const u8, + top_level_steps: []const []const u8, + available_options: std.json.ArrayHashMap(AvailableOption), + + pub const DepsBuildRoots = Package; + pub const Package = struct { + name: []const u8, + path: []const u8, + }; + pub const AvailableOption = std.meta.FieldType(std.meta.FieldType(std.Build, .available_options_map).KV, .value); +}; From 47b2fd0f8cf1d87fb54982922d14a1c12e65115d Mon Sep 17 00:00:00 2001 From: Techatrix Date: Tue, 26 Nov 2024 16:58:12 +0100 Subject: [PATCH 04/31] move ZCS logic into build_runner/shared.zig --- src/ZigCompileServer.zig | 167 ------------------------------------ src/build_runner/shared.zig | 110 ++++++++++++++++++++++++ src/translate_c.zig | 48 +++++++---- 3 files changed, 139 insertions(+), 186 deletions(-) delete mode 100644 src/ZigCompileServer.zig diff --git a/src/ZigCompileServer.zig b/src/ZigCompileServer.zig deleted file mode 100644 index dcbc6fafb..000000000 --- a/src/ZigCompileServer.zig +++ /dev/null @@ -1,167 +0,0 @@ -//! Client side for interfacing with the Zig Compile Server (made up name) -//! [Server.zig](https://github.com/ziglang/zig/blob/master/lib/std/zig/Server.zig) specifies the Message format for "Server" to "Client". -//! [Client.zig](https://github.com/ziglang/zig/blob/master/lib/std/zig/Client.zig) specifies the Message format for "Client" to "Server". -//! -//! The "Server" is Zig and the "Client" is ZLS. - -in: std.fs.File, -out: std.fs.File, -pooler: std.io.Poller(StreamEnum), - -const StreamEnum = enum { in }; - -pub const Options = struct { - gpa: Allocator, - in: std.fs.File, - out: std.fs.File, -}; - -pub fn init(options: Options) Client { - const s: Client = .{ - .in = options.in, - .out = options.out, - .pooler = std.io.poll(options.gpa, StreamEnum, .{ .in = options.in }), - }; - return s; -} - -pub fn deinit(s: *Client) void { - s.pooler.deinit(); - s.* = undefined; -} - -pub fn receiveMessage(client: *Client) !InMessage.Header { - const Header = InMessage.Header; - const fifo = client.pooler.fifo(.in); - - var first_run = true; - var header: ?Header = null; - while (first_run or try client.pooler.poll()) { - first_run = false; - - if (header == null) { - if (fifo.readableLength() < @sizeOf(Header)) continue; - const buf = fifo.readableSlice(0); - const bytes_len = bswap_and_workaround_u32(buf[4..][0..4]); - const tag = bswap_and_workaround_tag(buf[0..][0..4]); - header = Header{ - .tag = tag, - .bytes_len = bytes_len, - }; - fifo.discard(@sizeOf(Header)); - } - - if (header) |h| { - if (fifo.readableLength() < h.bytes_len) continue; - return h; - } - } - return error.Timeout; -} - -pub fn receiveEmitDigest(client: *Client) !InMessage.EmitDigest { - const reader = client.pooler.fifo(.in).reader(); - return reader.readStruct(InMessage.EmitDigest); -} - -pub fn receiveErrorBundle(client: *Client) !InMessage.ErrorBundle { - const reader = client.pooler.fifo(.in).reader(); - return .{ - .extra_len = try reader.readInt(u32, .little), - .string_bytes_len = try reader.readInt(u32, .little), - }; -} - -pub fn receiveBytes(client: *Client, allocator: std.mem.Allocator, len: usize) ![]u8 { - const reader = client.pooler.fifo(.in).reader(); - const result = try allocator.alloc(u8, len); - errdefer allocator.free(result); - const amt = try reader.readAll(result); - if (amt != len) return error.UnexpectedEOF; - return result; -} - -pub fn receiveIntArray(client: *Client, allocator: std.mem.Allocator, len: usize) ![]u32 { - const reader = client.pooler.fifo(.in).reader(); - const bytes = try allocator.alignedAlloc(u8, @alignOf(u32), len * @sizeOf(u32)); - errdefer allocator.free(bytes); - const amt = try reader.readAll(bytes); - if (amt != bytes.len) return error.UnexpectedEOF; - const result = std.mem.bytesAsSlice(u32, bytes); - if (need_bswap) { - bswap_u32_array(result); - } - return result; -} - -pub fn serveMessage( - client: *const Client, - header: OutMessage.Header, - bufs: []const []const u8, -) !void { - var iovecs: [10]std.posix.iovec_const = undefined; - const header_le = bswap(header); - iovecs[0] = .{ - .base = @as([*]const u8, @ptrCast(&header_le)), - .len = @sizeOf(OutMessage.Header), - }; - for (bufs, iovecs[1 .. bufs.len + 1]) |buf, *iovec| { - iovec.* = .{ - .base = buf.ptr, - .len = buf.len, - }; - } - try client.out.writevAll(iovecs[0 .. bufs.len + 1]); -} - -fn bswap(x: anytype) @TypeOf(x) { - if (!need_bswap) return x; - - const T = @TypeOf(x); - switch (@typeInfo(T)) { - .@"enum" => return @as(T, @enumFromInt(@byteSwap(@intFromEnum(x)))), - .int => return @byteSwap(x), - .@"struct" => |info| switch (info.layout) { - .@"extern" => { - var result: T = undefined; - inline for (info.fields) |field| { - @field(result, field.name) = bswap(@field(x, field.name)); - } - return result; - }, - .@"packed" => { - const I = info.backing_integer.?; - return @as(T, @bitCast(@byteSwap(@as(I, @bitCast(x))))); - }, - .auto => @compileError("auto layout struct"), - }, - else => @compileError("bswap on type " ++ @typeName(T)), - } -} - -fn bswap_u32_array(slice: []u32) void { - comptime assert(need_bswap); - for (slice) |*elem| elem.* = @byteSwap(elem.*); -} - -/// workaround for https://github.com/ziglang/zig/issues/14904 -fn bswap_and_workaround_u32(bytes_ptr: *const [4]u8) u32 { - return std.mem.readInt(u32, bytes_ptr, .little); -} - -/// workaround for https://github.com/ziglang/zig/issues/14904 -fn bswap_and_workaround_tag(bytes_ptr: *const [4]u8) InMessage.Tag { - const int = std.mem.readInt(u32, bytes_ptr, .little); - return @as(InMessage.Tag, @enumFromInt(int)); -} - -const OutMessage = std.zig.Client.Message; -const InMessage = std.zig.Server.Message; - -const Client = @This(); -const builtin = @import("builtin"); -const std = @import("std"); -const Allocator = std.mem.Allocator; -const assert = std.debug.assert; -const native_endian = builtin.target.cpu.arch.endian(); -const need_bswap = native_endian != .little; diff --git a/src/build_runner/shared.zig b/src/build_runner/shared.zig index 95e318e61..7aa69b4f5 100644 --- a/src/build_runner/shared.zig +++ b/src/build_runner/shared.zig @@ -1,4 +1,9 @@ +//! Contains shared code between ZLS and it's custom build runner + const std = @import("std"); +const builtin = @import("builtin"); +const native_endian = builtin.target.cpu.arch.endian(); +const need_bswap = native_endian != .little; pub const BuildConfig = struct { deps_build_roots: []DepsBuildRoots, @@ -14,3 +19,108 @@ pub const BuildConfig = struct { }; pub const AvailableOption = std.meta.FieldType(std.meta.FieldType(std.Build, .available_options_map).KV, .value); }; + +pub const Transport = struct { + in: std.fs.File, + out: std.fs.File, + poller: std.io.Poller(StreamEnum), + + const StreamEnum = enum { in }; + + pub const Header = extern struct { + tag: u32, + /// Size of the body only; does not include this Header. + bytes_len: u32, + }; + + pub const Options = struct { + gpa: std.mem.Allocator, + in: std.fs.File, + out: std.fs.File, + }; + + pub fn init(options: Options) Transport { + return .{ + .in = options.in, + .out = options.out, + .poller = std.io.poll(options.gpa, StreamEnum, .{ .in = options.in }), + }; + } + + pub fn deinit(transport: *Transport) void { + transport.poller.deinit(); + transport.* = undefined; + } + + pub fn receiveMessage(transport: *Transport, timeout_ns: ?u64) !Header { + const fifo = transport.poller.fifo(.in); + + poll: while (true) { + while (fifo.readableLength() < @sizeOf(Header)) { + if (!(if (timeout_ns) |timeout| try transport.poller.pollTimeout(timeout) else try transport.poller.poll())) break :poll; + } + const header = fifo.reader().readStructEndian(Header, .little) catch unreachable; + while (fifo.readableLength() < header.bytes_len) { + if (!(if (timeout_ns) |timeout| try transport.poller.pollTimeout(timeout) else try transport.poller.poll())) break :poll; + } + return header; + } + return error.EndOfStream; + } + + pub fn reader(transport: *Transport) std.io.PollFifo.Reader { + return transport.poller.fifo(.in).reader(); + } + + pub fn discard(transport: *Transport, bytes: usize) void { + transport.poller.fifo(.in).discard(bytes); + } + + pub fn receiveBytes( + transport: *Transport, + allocator: std.mem.Allocator, + len: usize, + ) (std.mem.Allocator.Error || std.fs.File.ReadError || error{EndOfStream})![]u8 { + return try transport.receiveSlice(allocator, u8, len); + } + + pub fn receiveSlice( + transport: *Transport, + allocator: std.mem.Allocator, + comptime T: type, + len: usize, + ) (std.mem.Allocator.Error || std.fs.File.ReadError || error{EndOfStream})![]T { + const bytes = try allocator.alignedAlloc(u8, @alignOf(T), len * @sizeOf(T)); + errdefer allocator.free(bytes); + const amt = try transport.reader().readAll(bytes); + if (amt != len * @sizeOf(T)) return error.EndOfStream; + const result = std.mem.bytesAsSlice(T, bytes); + std.debug.assert(result.len == len); + if (need_bswap) { + for (result) |*item| { + item.* = @byteSwap(item.*); + } + } + return result; + } + + pub fn serveMessage( + client: *const Transport, + header: Header, + bufs: []const []const u8, + ) std.fs.File.WriteError!void { + std.debug.assert(bufs.len < 10); + var iovecs: [10]std.posix.iovec_const = undefined; + var header_le = header; + if (need_bswap) std.mem.byteSwapAllFields(Header, &header_le); + const header_bytes = std.mem.asBytes(&header_le); + iovecs[0] = .{ .base = header_bytes.ptr, .len = header_bytes.len }; + for (bufs, iovecs[1 .. bufs.len + 1]) |buf, *iovec| { + iovec.* = .{ + .base = buf.ptr, + .len = buf.len, + }; + } + try client.out.writevAll(iovecs[0 .. bufs.len + 1]); + } +}; diff --git a/src/translate_c.zig b/src/translate_c.zig index 233257683..7722478b5 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -7,9 +7,12 @@ const ast = @import("ast.zig"); const tracy = @import("tracy"); const Ast = std.zig.Ast; const URI = @import("uri.zig"); -const ZCS = @import("ZigCompileServer.zig"); const log = std.log.scoped(.zls_translate_c); +const ZCSTransport = @import("build_runner/shared.zig").Transport; +const OutMessage = std.zig.Client.Message; +const InMessage = std.zig.Server.Message; + /// converts a `@cInclude` node into an equivalent c header file /// which can then be handed over to `zig translate-c` /// Caller owns returned memory. @@ -195,54 +198,61 @@ pub fn translate( }; }; - var zcs = ZCS.init(.{ + var zcs = ZCSTransport.init(.{ .gpa = allocator, .in = process.stdout.?, .out = process.stdin.?, }); defer zcs.deinit(); - try zcs.serveMessage(.{ .tag = .update, .bytes_len = 0 }, &.{}); - try zcs.serveMessage(.{ .tag = .exit, .bytes_len = 0 }, &.{}); + try zcs.serveMessage(.{ .tag = @intFromEnum(OutMessage.Tag.update), .bytes_len = 0 }, &.{}); + try zcs.serveMessage(.{ .tag = @intFromEnum(OutMessage.Tag.exit), .bytes_len = 0 }, &.{}); while (true) { - const header = try zcs.receiveMessage(); + const header = try zcs.receiveMessage(20 * std.time.ns_per_s); // log.debug("received header: {}", .{header}); - switch (header.tag) { + switch (@as(InMessage.Tag, @enumFromInt(header.tag))) { .zig_version => { // log.debug("zig-version: {s}", .{zcs.receive_fifo.readableSliceOfLen(header.bytes_len)}); - zcs.pooler.fifo(.in).discard(header.bytes_len); + zcs.discard(header.bytes_len); }, .emit_digest => { - const body_size = @sizeOf(std.zig.Server.Message.EmitDigest); - if (header.bytes_len <= body_size) return error.InvalidResponse; + const expected_size: usize = @sizeOf(std.zig.Server.Message.EmitDigest) + 16; + if (header.bytes_len != expected_size) return error.InvalidResponse; - _ = try zcs.receiveEmitDigest(); + zcs.discard(@sizeOf(InMessage.EmitDigest)); - const trailing_size = header.bytes_len - body_size; - const bin_result_path = zcs.pooler.fifo(.in).readableSliceOfLen(trailing_size); - const hex_result_path = std.Build.Cache.binToHex(bin_result_path[0..16].*); + const bin_result_path = try zcs.reader().readBytesNoEof(16); + const hex_result_path = std.Build.Cache.binToHex(bin_result_path); const result_path = try std.fs.path.join(allocator, &.{ global_cache_path, "o", &hex_result_path, "cimport.zig" }); defer allocator.free(result_path); - return Result{ .success = try URI.fromPath(allocator, std.mem.sliceTo(result_path, '\n')) }; + return .{ .success = try URI.fromPath(allocator, std.mem.sliceTo(result_path, '\n')) }; }, .error_bundle => { - const error_bundle_header = try zcs.receiveErrorBundle(); + if (header.bytes_len < @sizeOf(InMessage.ErrorBundle)) return error.InvalidResponse; + + const error_bundle_header: InMessage.ErrorBundle = .{ + .extra_len = try zcs.reader().readInt(u32, .little), + .string_bytes_len = try zcs.reader().readInt(u32, .little), + }; + + const expected_size = @sizeOf(InMessage.ErrorBundle) + error_bundle_header.extra_len * @sizeOf(u32) + error_bundle_header.string_bytes_len; + if (header.bytes_len != expected_size) return error.InvalidResponse; - const extra = try zcs.receiveIntArray(allocator, error_bundle_header.extra_len); + const extra = try zcs.receiveSlice(allocator, u32, error_bundle_header.extra_len); errdefer allocator.free(extra); const string_bytes = try zcs.receiveBytes(allocator, error_bundle_header.string_bytes_len); errdefer allocator.free(string_bytes); - const error_bundle = std.zig.ErrorBundle{ .string_bytes = string_bytes, .extra = extra }; + const error_bundle: std.zig.ErrorBundle = .{ .string_bytes = string_bytes, .extra = extra }; - return Result{ .failure = error_bundle }; + return .{ .failure = error_bundle }; }, else => { - zcs.pooler.fifo(.in).discard(header.bytes_len); + zcs.discard(header.bytes_len); }, } } From 82bef301352678297012bd968ee19ea9a03c5d4b Mon Sep 17 00:00:00 2001 From: Techatrix Date: Tue, 26 Nov 2024 17:26:59 +0100 Subject: [PATCH 05/31] remove the current build-on-save implementation --- src/Server.zig | 58 ---------- src/features/diagnostics.zig | 211 ----------------------------------- 2 files changed, 269 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index ee682b5ed..f54cf86bb 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -69,7 +69,6 @@ workspaces: std.ArrayListUnmanaged(Workspace) = .{}, const Workspace = struct { uri: types.URI, - is_build_on_save_running: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), fn deinit(self: *Workspace, allocator: std.mem.Allocator) void { allocator.free(self.uri); @@ -942,8 +941,6 @@ pub fn updateConfiguration( result.deinit(server.document_store.allocator); } server.document_store.cimports.clearAndFree(server.document_store.allocator); - - server.scheduleBuildOnSave(); } if (server.status == .initialized) { @@ -1374,8 +1371,6 @@ fn saveDocumentHandler(server: *Server, arena: std.mem.Allocator, notification: server.document_store.invalidateBuildFile(uri); } - server.scheduleBuildOnSave(); - if (server.getAutofixMode() == .on_save) { const handle = server.document_store.getHandle(uri) orelse return; var text_edits = try server.autofix(arena, handle); @@ -1688,59 +1683,6 @@ fn selectionRangeHandler(server: *Server, arena: std.mem.Allocator, request: typ return try selection_range.generateSelectionRanges(arena, handle, request.positions, server.offset_encoding); } -fn scheduleBuildOnSave(server: *Server) void { - if (!std.process.can_spawn) return; - if (server.config.enable_build_on_save == false) return; - if (!server.client_capabilities.supports_publish_diagnostics) return; - - for (server.workspaces.items) |*workspace| { - if (zig_builtin.single_threaded) { - server.runBuildOnSave(workspace); - } else { - // TODO race-condition: `server.workspaces` may be modified - server.thread_pool.spawn(runBuildOnSave, .{ server, workspace }) catch { - server.runBuildOnSave(workspace); - }; - } - } -} - -fn runBuildOnSave(server: *Server, workspace: *Workspace) void { - comptime std.debug.assert(std.process.can_spawn); - - const was_build_on_save_running = workspace.is_build_on_save_running.swap(true, .acq_rel); - if (was_build_on_save_running) return; - - defer workspace.is_build_on_save_running.store(false, .release); - - var diagnostic_set: std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(types.Diagnostic)) = .{}; - - var arena_allocator = std.heap.ArenaAllocator.init(server.allocator); - defer arena_allocator.deinit(); - - diagnostics_gen.generateBuildOnSaveDiagnostics( - server.allocator, - &server.document_store, - // TODO data-race on server.config - server.config, - workspace.uri, - arena_allocator.allocator(), - &diagnostic_set, - ) catch |err| { - log.err("failed to run build on save on {s}: {}", .{ workspace.uri, err }); - return; - }; - - for (diagnostic_set.keys(), diagnostic_set.values()) |document_uri, diagnostics| { - std.debug.assert(server.client_capabilities.supports_publish_diagnostics); - const json_message = server.sendToClientNotification("textDocument/publishDiagnostics", .{ - .uri = document_uri, - .diagnostics = diagnostics.items, - }) catch return; - server.allocator.free(json_message); - } -} - const HandledRequestParams = union(enum) { initialize: types.InitializeParams, shutdown, diff --git a/src/features/diagnostics.zig b/src/features/diagnostics.zig index c27392b4c..83cb665b1 100644 --- a/src/features/diagnostics.zig +++ b/src/features/diagnostics.zig @@ -209,217 +209,6 @@ pub fn generateDiagnostics(server: *Server, arena: std.mem.Allocator, handle: *D }; } -pub fn generateBuildOnSaveDiagnostics( - allocator: std.mem.Allocator, - store: *DocumentStore, - config: Config, - workspace_uri: types.URI, - arena: std.mem.Allocator, - diagnostics: *std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(types.Diagnostic)), -) !void { - const tracy_zone = tracy.trace(@src()); - defer tracy_zone.end(); - comptime std.debug.assert(std.process.can_spawn); - - const zig_exe_path = config.zig_exe_path orelse return; - const zig_lib_path = config.zig_lib_path orelse return; - - const workspace_path = URI.parse(allocator, workspace_uri) catch |err| { - log.err("failed to parse invalid uri '{s}': {}", .{ workspace_uri, err }); - return; - }; - defer allocator.free(workspace_path); - - std.debug.assert(std.fs.path.isAbsolute(workspace_path)); - - const build_zig_path = try std.fs.path.join(allocator, &.{ workspace_path, "build.zig" }); - defer allocator.free(build_zig_path); - - std.fs.accessAbsolute(build_zig_path, .{}) catch |err| switch (err) { - error.FileNotFound => return, - else => |e| { - log.err("failed to load build.zig at '{s}': {}", .{ build_zig_path, e }); - return e; - }, - }; - - const build_zig_uri = try URI.fromPath(allocator, build_zig_path); - defer allocator.free(build_zig_uri); - - const base_args = &[_][]const u8{ - zig_exe_path, - "build", - "--zig-lib-dir", - zig_lib_path, - "-fno-reference-trace", - "--summary", - "none", - }; - - var argv = try std.ArrayListUnmanaged([]const u8).initCapacity(arena, base_args.len + config.build_on_save_args.len); - defer argv.deinit(arena); - argv.appendSliceAssumeCapacity(base_args); - argv.appendSliceAssumeCapacity(config.build_on_save_args); - - const has_explicit_steps = for (config.build_on_save_args) |extra_arg| { - if (!std.mem.startsWith(u8, extra_arg, "-")) break true; - } else false; - - var has_check_step: bool = false; - - blk: { - store.lock.lockShared(); - defer store.lock.unlockShared(); - const build_file = store.build_files.get(build_zig_uri) orelse break :blk; - - no_build_config: { - const build_associated_config = build_file.build_associated_config orelse break :no_build_config; - const build_options = build_associated_config.value.build_options orelse break :no_build_config; - - try argv.ensureUnusedCapacity(arena, build_options.len); - for (build_options) |build_option| { - argv.appendAssumeCapacity(try build_option.formatParam(arena)); - } - } - - no_check: { - if (has_explicit_steps) break :no_check; - const build_config = build_file.tryLockConfig() orelse break :no_check; - defer build_file.unlockConfig(); - for (build_config.top_level_steps) |tls| { - if (std.mem.eql(u8, tls, "check")) { - has_check_step = true; - break; - } - } - } - } - - if (!(config.enable_build_on_save orelse has_check_step)) { - return; - } - - if (has_check_step) { - std.debug.assert(!has_explicit_steps); - try argv.append(arena, "check"); - } - - const extra_args_joined = try std.mem.join(allocator, " ", argv.items[base_args.len..]); - defer allocator.free(extra_args_joined); - - log.info("Running build-on-save: {s} ({s})", .{ build_zig_uri, extra_args_joined }); - - const result = std.process.Child.run(.{ - .allocator = allocator, - .argv = argv.items, - .cwd = workspace_path, - .max_output_bytes = 16 * 1024 * 1024, - }) catch |err| { - const joined = std.mem.join(allocator, " ", argv.items) catch return; - defer allocator.free(joined); - log.err("failed zig build command:\n{s}\nerror:{}\n", .{ joined, err }); - return err; - }; - defer allocator.free(result.stdout); - defer allocator.free(result.stderr); - - switch (result.term) { - .Exited => |code| if (code == 0) return, - else => { - const joined = std.mem.join(allocator, " ", argv.items) catch return; - defer allocator.free(joined); - log.err("failed zig build command:\n{s}\nstderr:{s}\n\n", .{ joined, result.stderr }); - }, - } - - var last_diagnostic_uri: ?types.URI = null; - var last_diagnostic: ?types.Diagnostic = null; - // we don't store DiagnosticRelatedInformation in last_diagnostic instead - // its stored in last_related_diagnostics because we need an ArrayList - var last_related_diagnostics: std.ArrayListUnmanaged(types.DiagnosticRelatedInformation) = .{}; - - // I believe that with color off it's one diag per line; is this correct? - var line_iterator = std.mem.splitScalar(u8, result.stderr, '\n'); - - while (line_iterator.next()) |line| { - var pos_and_diag_iterator = std.mem.splitScalar(u8, line, ':'); - - const src_path = pos_and_diag_iterator.next() orelse continue; - const absolute_src_path = if (std.fs.path.isAbsolute(src_path)) src_path else blk: { - const absolute_src_path = (if (src_path.len == 1) - // it's a drive letter - std.fs.path.join(arena, &.{ line[0..2], pos_and_diag_iterator.next() orelse continue }) - else - std.fs.path.join(arena, &.{ workspace_path, src_path })) catch continue; - if (!std.fs.path.isAbsolute(absolute_src_path)) continue; - break :blk absolute_src_path; - }; - - const src_line = pos_and_diag_iterator.next() orelse continue; - const src_character = pos_and_diag_iterator.next() orelse continue; - - // TODO zig uses utf-8 encoding for character offsets - // convert them to the desired offset encoding would require loading every file that contains errors - // is there some efficient way to do this? - const utf8_position: types.Position = .{ - .line = (std.fmt.parseInt(u32, src_line, 10) catch continue) -| 1, - .character = std.fmt.parseInt(u32, src_character, 10) catch continue, - }; - const range: types.Range = .{ .start = utf8_position, .end = utf8_position }; - - const rest = pos_and_diag_iterator.rest(); - if (rest.len <= 1) continue; - const msg = rest[1..]; - - if (std.mem.startsWith(u8, msg, "note: ")) { - try last_related_diagnostics.append(arena, .{ - .location = .{ - .uri = try URI.fromPath(arena, absolute_src_path), - .range = range, - }, - .message = try arena.dupe(u8, msg["note: ".len..]), - }); - continue; - } - - if (last_diagnostic) |*diagnostic| { - diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(arena); - const entry = try diagnostics.getOrPutValue(arena, last_diagnostic_uri.?, .{}); - try entry.value_ptr.append(arena, diagnostic.*); - last_diagnostic_uri = null; - last_diagnostic = null; - } - - if (std.mem.startsWith(u8, msg, "error: ")) { - last_diagnostic_uri = try URI.fromPath(arena, absolute_src_path); - last_diagnostic = .{ - .range = range, - .severity = .Error, - .code = .{ .string = "zig_build" }, - .source = "zls", - .message = try arena.dupe(u8, msg["error: ".len..]), - }; - } else { - last_diagnostic_uri = try URI.fromPath(arena, absolute_src_path); - last_diagnostic = .{ - .range = range, - .severity = .Error, - .code = .{ .string = "zig_build" }, - .source = "zls", - .message = try arena.dupe(u8, msg), - }; - } - } - - if (last_diagnostic) |*diagnostic| { - diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(arena); - const entry = try diagnostics.getOrPutValue(arena, last_diagnostic_uri.?, .{}); - try entry.value_ptr.append(arena, diagnostic.*); - last_diagnostic_uri = null; - last_diagnostic = null; - } -} - /// caller owns the returned ErrorBundle pub fn getAstCheckDiagnostics(server: *Server, handle: *DocumentStore.Handle) error{OutOfMemory}!std.zig.ErrorBundle { const tracy_zone = tracy.trace(@src()); From f5536805925114e36ce376c1b141a9c426feccea Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 28 Nov 2024 17:21:41 +0100 Subject: [PATCH 06/31] store diagnostics in a separate container --- src/DiagnosticsCollection.zig | 391 +++++++++++++++++++++++++++ src/DocumentStore.zig | 3 + src/Server.zig | 10 +- src/features/diagnostics.zig | 393 ++++++++++++++-------------- tests/lsp_features/code_actions.zig | 13 +- 5 files changed, 590 insertions(+), 220 deletions(-) create mode 100644 src/DiagnosticsCollection.zig diff --git a/src/DiagnosticsCollection.zig b/src/DiagnosticsCollection.zig new file mode 100644 index 000000000..507702714 --- /dev/null +++ b/src/DiagnosticsCollection.zig @@ -0,0 +1,391 @@ +const std = @import("std"); +const lsp = @import("lsp"); +const offsets = @import("offsets.zig"); +const URI = @import("uri.zig"); + +allocator: std.mem.Allocator, +mutex: std.Thread.Mutex = .{}, +tag_set: std.AutoArrayHashMapUnmanaged(Tag, struct { + version: u32 = 0, + error_bundle_src_base_path: ?[]const u8 = null, + error_bundle: std.zig.ErrorBundle = std.zig.ErrorBundle.empty, + diagnostics_set: std.StringArrayHashMapUnmanaged(struct { + arena: std.heap.ArenaAllocator.State, + diagnostics: []lsp.types.Diagnostic, + }) = .{}, +}) = .{}, +outdated_files: std.StringArrayHashMapUnmanaged(void) = .{}, + +const DiagnosticsCollection = @This(); + +/// Diangostics with different tags are treated independently. +/// This enables the DiagnosticsCollection to differentiate syntax level errors from build-on-save errors. +/// Build on Save diagnostics have an tag that is the hash of the build step and the path to the `build.zig` +pub const Tag = enum(u32) { + /// * `std.zig.Ast.parse` + /// * ast-check + /// * warn_style + parse, + /// errors from `@cImport` + cimport, + _, +}; + +pub fn deinit(collection: *DiagnosticsCollection) void { + for (collection.tag_set.values()) |*entry| { + entry.error_bundle.deinit(collection.allocator); + if (entry.error_bundle_src_base_path) |src_path| collection.allocator.free(src_path); + for (entry.diagnostics_set.keys(), entry.diagnostics_set.values()) |uri, lsp_diagnostic| { + collection.allocator.free(uri); + lsp_diagnostic.arena.promote(collection.allocator).deinit(); + collection.allocator.free(lsp_diagnostic.diagnostics); + } + entry.diagnostics_set.deinit(collection.allocator); + } + collection.tag_set.deinit(collection.allocator); + for (collection.outdated_files.keys()) |uri| collection.allocator.free(uri); + collection.outdated_files.deinit(collection.allocator); + collection.* = undefined; +} + +/// Overrides all diangostics for the given document. +pub fn pushLspDiagnostics( + collection: *DiagnosticsCollection, + tag: Tag, + document_uri: []const u8, + diagnostics_arena_state: std.heap.ArenaAllocator.State, + diagnostics: []lsp.types.Diagnostic, +) !void { + collection.mutex.lock(); + defer collection.mutex.unlock(); + + const gop_tag = try collection.tag_set.getOrPutValue(collection.allocator, tag, .{}); + + { + try collection.outdated_files.ensureUnusedCapacity(collection.allocator, 1); + const duped_uri = try collection.allocator.dupe(u8, document_uri); + if (collection.outdated_files.fetchPutAssumeCapacity(duped_uri, {})) |_| collection.allocator.free(duped_uri); + } + + const duped_uri = try collection.allocator.dupe(u8, document_uri); + try gop_tag.value_ptr.diagnostics_set.ensureUnusedCapacity(collection.allocator, 1); + const gop_file = gop_tag.value_ptr.diagnostics_set.getOrPutAssumeCapacity(duped_uri); + if (gop_file.found_existing) { + collection.allocator.free(duped_uri); + gop_file.value_ptr.arena.promote(collection.allocator).deinit(); + } + + gop_file.value_ptr.* = .{ + .arena = diagnostics_arena_state, + .diagnostics = diagnostics, + }; +} + +pub fn pushErrorBundle( + collection: *DiagnosticsCollection, + tag: Tag, + /// * If the `version` is greater than the old version, all diagnostics get removed and the errors from `error_bundle` get added and the `version` is updated. + /// * If the `version` is equal to the old version, the errors from `error_bundle` get added. + /// * If the `version` is less than the old version, the errors from `error_bundle` are ignored. + version: u32, + /// Used to resolve relative `std.zig.ErrorBundle.SourceLocation.src_path` + /// + /// The current implementation assumes that the base path is always the same for the same tag. + src_base_path: ?[]const u8, + error_bundle: std.zig.ErrorBundle, +) !void { + var new_error_bundle: std.zig.ErrorBundle.Wip = undefined; + try new_error_bundle.init(collection.allocator); + defer new_error_bundle.deinit(); + + collection.mutex.lock(); + defer collection.mutex.unlock(); + + const gop = try collection.tag_set.getOrPutValue(collection.allocator, tag, .{}); + const version_order = std.math.order(version, gop.value_ptr.version); + + if (version_order == .lt) return; + defer gop.value_ptr.version = version; + + try collectUrisFromErrorBundle(collection.allocator, error_bundle, src_base_path, &collection.outdated_files); + if (error_bundle.errorMessageCount() != 0) { + try new_error_bundle.addBundleAsRoots(error_bundle); + } + + if (version_order == .gt) { + try collectUrisFromErrorBundle(collection.allocator, gop.value_ptr.error_bundle, src_base_path, &collection.outdated_files); + } else { + if (gop.value_ptr.error_bundle.errorMessageCount() != 0) { + try new_error_bundle.addBundleAsRoots(gop.value_ptr.error_bundle); + } + } + + var owned_error_bundle = try new_error_bundle.toOwnedBundle(""); + errdefer owned_error_bundle.deinit(collection.allocator); + + const duped_error_bundle_src_base_path = if (src_base_path) |base_path| try collection.allocator.dupe(u8, base_path) else null; + errdefer if (duped_error_bundle_src_base_path) |base_path| collection.allocator.free(base_path); + + errdefer comptime unreachable; + + gop.value_ptr.error_bundle.deinit(collection.allocator); + gop.value_ptr.error_bundle = owned_error_bundle; + + if (duped_error_bundle_src_base_path) |base_path| { + if (gop.value_ptr.error_bundle_src_base_path) |old_base_path| { + collection.allocator.free(old_base_path); + gop.value_ptr.error_bundle_src_base_path = null; + } + gop.value_ptr.error_bundle_src_base_path = base_path; + } +} + +fn collectUrisFromErrorBundle( + allocator: std.mem.Allocator, + error_bundle: std.zig.ErrorBundle, + src_base_path: ?[]const u8, + uri_set: *std.StringArrayHashMapUnmanaged(void), +) std.mem.Allocator.Error!void { + if (error_bundle.errorMessageCount() == 0) return; + for (error_bundle.getMessages()) |msg_index| { + const err = error_bundle.getErrorMessage(msg_index); + if (err.src_loc == .none) continue; + const src_loc = error_bundle.getSourceLocation(err.src_loc); + const src_path = error_bundle.nullTerminatedString(src_loc.src_path); + + try uri_set.ensureUnusedCapacity(allocator, 1); + const uri = try pathToUri(allocator, src_base_path, src_path) orelse continue; + if (uri_set.fetchPutAssumeCapacity(uri, {})) |_| { + allocator.free(uri); + } + } +} + +fn pathToUri(allocator: std.mem.Allocator, base_path: ?[]const u8, src_path: []const u8) error{OutOfMemory}!?[]const u8 { + if (std.fs.path.isAbsolute(src_path)) { + return try URI.fromPath(allocator, src_path); + } + const base = base_path orelse return null; + const absolute_src_path = try std.fs.path.join(allocator, &.{ base, src_path }); + defer allocator.free(absolute_src_path); + + return try URI.fromPath(allocator, absolute_src_path); +} + +pub fn publishDiagnostics( + collection: *DiagnosticsCollection, + transport: lsp.AnyTransport, + offset_encoding: offsets.Encoding, +) (std.mem.Allocator.Error || lsp.AnyTransport.WriteError)!void { + var arena_allocator = std.heap.ArenaAllocator.init(collection.allocator); + defer arena_allocator.deinit(); + + while (true) { + const json_message = blk: { + collection.mutex.lock(); + defer collection.mutex.unlock(); + + const entry = collection.outdated_files.popOrNull() orelse break; + defer collection.allocator.free(entry.key); + const document_uri = entry.key; + + _ = arena_allocator.reset(.retain_capacity); + + var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; + try collection.collectLspDiagnosticsForDocument(document_uri, offset_encoding, arena_allocator.allocator(), &diagnostics); + + const params: lsp.types.PublishDiagnosticsParams = .{ + .uri = document_uri, + .diagnostics = diagnostics.items, + }; + + // TODO make the diagnostics serializable without requiring the mutex to be locked + break :blk try std.fmt.allocPrint(collection.allocator, + \\{{"jsonrpc":"2.0","method":"textDocument/publishDiagnostics","params":{}}} + , .{ + std.json.fmt(params, .{ .emit_null_optional_fields = false }), + }); + }; + defer collection.allocator.free(json_message); + + try transport.writeJsonMessage(json_message); + } +} + +fn collectLspDiagnosticsForDocument( + collection: *DiagnosticsCollection, + document_uri: []const u8, + offset_encoding: offsets.Encoding, + arena: std.mem.Allocator, + diagnostics: *std.ArrayListUnmanaged(lsp.types.Diagnostic), +) error{OutOfMemory}!void { + for (collection.tag_set.values()) |entry| { + if (entry.diagnostics_set.get(document_uri)) |lsp_diangostics| { + try diagnostics.appendSlice(arena, lsp_diangostics.diagnostics); + } + + const eb = entry.error_bundle; + if (eb.errorMessageCount() == 0) continue; // `getMessages` can't be called on an empty ErrorBundle + for (eb.getMessages()) |msg_index| { + const err = eb.getErrorMessage(msg_index); + if (err.src_loc == .none) continue; + + const src_loc = eb.getSourceLocation(err.src_loc); + const src_path = eb.nullTerminatedString(src_loc.src_path); + + const uri = try pathToUri(arena, entry.error_bundle_src_base_path, src_path) orelse continue; + if (!std.mem.eql(u8, document_uri, uri)) continue; + + const src_range = errorBundleSourceLocationToRange(eb, src_loc, offset_encoding); + + const eb_notes = eb.getNotes(msg_index); + const relatedInformation = if (eb_notes.len == 0) null else blk: { + const lsp_notes = try arena.alloc(lsp.types.DiagnosticRelatedInformation, eb_notes.len); + for (lsp_notes, eb_notes) |*lsp_note, eb_note_index| { + const eb_note = eb.getErrorMessage(eb_note_index); + if (eb_note.src_loc == .none) continue; + + const note_src_loc = eb.getSourceLocation(eb_note.src_loc); + const note_src_path = eb.nullTerminatedString(note_src_loc.src_path); + const note_src_range = errorBundleSourceLocationToRange(eb, note_src_loc, offset_encoding); + + const note_uri = try pathToUri(arena, entry.error_bundle_src_base_path, note_src_path) orelse continue; + + lsp_note.* = .{ + .location = .{ + .uri = note_uri, + .range = note_src_range, + }, + .message = eb.nullTerminatedString(eb_note.msg), + }; + } + break :blk lsp_notes; + }; + + try diagnostics.append(arena, .{ + .range = src_range, + .severity = .Error, + .source = "zls", + .message = eb.nullTerminatedString(err.msg), + .relatedInformation = relatedInformation, + }); + } + } +} + +fn errorBundleSourceLocationToRange( + error_bundle: std.zig.ErrorBundle, + src_loc: std.zig.ErrorBundle.SourceLocation, + offset_encoding: offsets.Encoding, +) lsp.types.Range { + const source_line = error_bundle.nullTerminatedString(src_loc.source_line); + + const source_line_range_utf8: lsp.types.Range = .{ + .start = .{ .line = 0, .character = src_loc.column - (src_loc.span_main - src_loc.span_start) }, + .end = .{ .line = 0, .character = src_loc.column + (src_loc.span_end - src_loc.span_main) }, + }; + const source_line_range = offsets.convertRangeEncoding(source_line, source_line_range_utf8, .@"utf-8", offset_encoding); + + return .{ + .start = .{ .line = src_loc.line, .character = source_line_range.start.character }, + .end = .{ .line = src_loc.line, .character = source_line_range.end.character }, + }; +} + +test { + var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); + defer arena_allocator.deinit(); + + const arena = arena_allocator.allocator(); + + var collection: DiagnosticsCollection = .{ .allocator = std.testing.allocator }; + defer collection.deinit(); + + try std.testing.expectEqual(0, collection.outdated_files.count()); + + var eb1 = try createTestingErrorBundle(&.{.{ .message = "Living For The City" }}); + defer eb1.deinit(std.testing.allocator); + var eb2 = try createTestingErrorBundle(&.{.{ .message = "You Haven't Done Nothin'" }}); + defer eb2.deinit(std.testing.allocator); + var eb3 = try createTestingErrorBundle(&.{.{ .message = "As" }}); + defer eb3.deinit(std.testing.allocator); + + { + try collection.pushErrorBundle(.parse, 1, null, eb1); + try std.testing.expectEqual(1, collection.outdated_files.count()); + try std.testing.expectEqualStrings("file:///sample.zig", collection.outdated_files.keys()[0]); + + var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; + try collection.collectLspDiagnosticsForDocument("file:///sample.zig", .@"utf-8", arena, &diagnostics); + + try std.testing.expectEqual(1, diagnostics.items.len); + try std.testing.expectEqual(lsp.types.DiagnosticSeverity.Error, diagnostics.items[0].severity); + try std.testing.expectEqualStrings("Living For The City", diagnostics.items[0].message); + try std.testing.expectEqual(null, diagnostics.items[0].relatedInformation); + } + + { + try collection.pushErrorBundle(.parse, 0, null, eb2); + + var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; + try collection.collectLspDiagnosticsForDocument("file:///sample.zig", .@"utf-8", arena, &diagnostics); + + try std.testing.expectEqual(1, diagnostics.items.len); + try std.testing.expectEqualStrings("Living For The City", diagnostics.items[0].message); + } + + { + try collection.pushErrorBundle(.parse, 2, null, eb2); + + var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; + try collection.collectLspDiagnosticsForDocument("file:///sample.zig", .@"utf-8", arena, &diagnostics); + + try std.testing.expectEqual(1, diagnostics.items.len); + try std.testing.expectEqualStrings("You Haven't Done Nothin'", diagnostics.items[0].message); + } + + { + try collection.pushErrorBundle(.parse, 3, null, .empty); + + var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; + try collection.collectLspDiagnosticsForDocument("file:///sample.zig", .@"utf-8", arena, &diagnostics); + + try std.testing.expectEqual(0, diagnostics.items.len); + } +} + +fn createTestingErrorBundle(messages: []const struct { + message: []const u8, + count: u32 = 1, + source_location: struct { + src_path: []const u8, + line: u32, + column: u32, + span_start: u32, + span_main: u32, + span_end: u32, + source_line: []const u8, + } = .{ .src_path = "/sample.zig", .line = 0, .column = 0, .span_start = 0, .span_main = 0, .span_end = 0, .source_line = "" }, +}) error{OutOfMemory}!std.zig.ErrorBundle { + var eb: std.zig.ErrorBundle.Wip = undefined; + try eb.init(std.testing.allocator); + errdefer eb.deinit(); + + for (messages) |msg| { + try eb.addRootErrorMessage(.{ + .msg = try eb.addString(msg.message), + .count = msg.count, + .src_loc = try eb.addSourceLocation(.{ + .src_path = try eb.addString(msg.source_location.src_path), + .line = msg.source_location.line, + .column = msg.source_location.column, + .span_start = msg.source_location.span_start, + .span_main = msg.source_location.span_main, + .span_end = msg.source_location.span_end, + .source_line = try eb.addString(msg.source_location.source_line), + }), + }); + } + + return eb.toOwnedBundle(""); +} diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index f55d917d6..e07742131 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -174,6 +174,7 @@ pub const BuildFile = struct { /// Represents a Zig source file. pub const Handle = struct { uri: Uri, + version: u32 = 0, tree: Ast, /// Contains one entry for every import in the document import_uris: std.ArrayListUnmanaged(Uri) = .{}, @@ -516,6 +517,8 @@ pub const Handle = struct { self.impl.document_scope = undefined; self.impl.zir = undefined; + self.version += 1; + self.impl.lock.unlock(); self.impl.allocator.free(old_tree.source); diff --git a/src/Server.zig b/src/Server.zig index f54cf86bb..45f037a8e 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -18,6 +18,7 @@ const offsets = @import("offsets.zig"); const tracy = @import("tracy"); const diff = @import("diff.zig"); const InternPool = @import("analyser/analyser.zig").InternPool; +const DiagnosticsCollection = @import("DiagnosticsCollection.zig"); const known_folders = @import("known-folders"); const BuildRunnerVersion = @import("build_runner/BuildRunnerVersion.zig").BuildRunnerVersion; @@ -63,6 +64,7 @@ zig_ast_check_lock: std.Thread.Mutex = .{}, /// often in one session, config_arena: std.heap.ArenaAllocator.State = .{}, client_capabilities: ClientCapabilities = .{}, +diagnostics_collection: DiagnosticsCollection, workspaces: std.ArrayListUnmanaged(Workspace) = .{}, // Code was based off of https://github.com/andersfr/zig-lsp/blob/master/server.zig @@ -1788,6 +1790,7 @@ pub fn create(allocator: std.mem.Allocator) !*Server { .job_queue = std.fifo.LinearFifo(Job, .Dynamic).init(allocator), .thread_pool = undefined, // set below .wait_group = if (zig_builtin.single_threaded) {} else .{}, + .diagnostics_collection = .{ .allocator = allocator }, }; if (zig_builtin.single_threaded) { @@ -1817,6 +1820,7 @@ pub fn destroy(server: *Server) void { server.ip.deinit(server.allocator); for (server.workspaces.items) |*workspace| workspace.deinit(server.allocator); server.workspaces.deinit(server.allocator); + server.diagnostics_collection.deinit(); server.client_capabilities.deinit(server.allocator); server.config_arena.promote(server.allocator).deinit(); server.allocator.destroy(server); @@ -2034,11 +2038,7 @@ fn processJob(server: *Server, job: Job) void { }, .generate_diagnostics => |uri| { const handle = server.document_store.getHandle(uri) orelse return; - var arena_allocator = std.heap.ArenaAllocator.init(server.allocator); - defer arena_allocator.deinit(); - const diagnostics = diagnostics_gen.generateDiagnostics(server, arena_allocator.allocator(), handle) catch return; - const json_message = server.sendToClientNotification("textDocument/publishDiagnostics", diagnostics) catch return; - server.allocator.free(json_message); + diagnostics_gen.generateDiagnostics(server, handle) catch return; }, } } diff --git a/src/features/diagnostics.zig b/src/features/diagnostics.zig index 83cb665b1..da210c489 100644 --- a/src/features/diagnostics.zig +++ b/src/features/diagnostics.zig @@ -5,7 +5,6 @@ const Ast = std.zig.Ast; const log = std.log.scoped(.zls_diag); const Server = @import("../Server.zig"); -const Config = @import("../Config.zig"); const DocumentStore = @import("../DocumentStore.zig"); const types = @import("lsp").types; const Analyser = @import("../analysis.zig"); @@ -14,142 +13,189 @@ const offsets = @import("../offsets.zig"); const URI = @import("../uri.zig"); const code_actions = @import("code_actions.zig"); const tracy = @import("tracy"); +const DiagnosticsCollection = @import("../DiagnosticsCollection.zig"); const Zir = std.zig.Zir; -pub fn generateDiagnostics(server: *Server, arena: std.mem.Allocator, handle: *DocumentStore.Handle) error{OutOfMemory}!types.PublishDiagnosticsParams { +pub fn generateDiagnostics( + server: *Server, + handle: *DocumentStore.Handle, +) error{OutOfMemory}!void { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - std.debug.assert(server.client_capabilities.supports_publish_diagnostics); + const transport = server.transport orelse return; - const tree = handle.tree; + { + var arena_allocator = std.heap.ArenaAllocator.init(server.diagnostics_collection.allocator); + errdefer arena_allocator.deinit(); + const arena = arena_allocator.allocator(); - var diagnostics: std.ArrayListUnmanaged(types.Diagnostic) = .{}; + var diagnostics: std.ArrayListUnmanaged(types.Diagnostic) = .{}; - try diagnostics.ensureUnusedCapacity(arena, tree.errors.len); - for (tree.errors) |err| { - const tracy_zone2 = tracy.traceNamed(@src(), "parse"); - defer tracy_zone2.end(); + try collectParseDiagnostics(handle.tree, arena, &diagnostics, server.offset_encoding); - var buffer: std.ArrayListUnmanaged(u8) = .{}; - try tree.renderError(err, buffer.writer(arena)); + if (server.getAutofixMode() != .none and handle.tree.mode == .zig) { + try code_actions.collectAutoDiscardDiagnostics(handle.tree, arena, &diagnostics, server.offset_encoding); + } - diagnostics.appendAssumeCapacity(.{ - .range = offsets.tokenToRange(tree, err.token, server.offset_encoding), - .severity = .Error, - .code = .{ .string = @tagName(err.tag) }, - .source = "zls", - .message = try buffer.toOwnedSlice(arena), - }); + if (server.config.warn_style and handle.tree.mode == .zig) { + try collectWarnStyleDiagnostics(handle.tree, arena, &diagnostics, server.offset_encoding); + } + + if (server.config.highlight_global_var_declarations and handle.tree.mode == .zig) { + try collectGlobalVarDiagnostics(handle.tree, arena, &diagnostics, server.offset_encoding); + } + + try server.diagnostics_collection.pushLspDiagnostics(.parse, handle.uri, arena_allocator.state, diagnostics.items); } - if (tree.errors.len == 0 and tree.mode == .zig) { + if (handle.tree.errors.len == 0 and handle.tree.mode == .zig) { const tracy_zone2 = tracy.traceNamed(@src(), "ast-check"); defer tracy_zone2.end(); var error_bundle = try getAstCheckDiagnostics(server, handle); defer error_bundle.deinit(server.allocator); - var diagnostics_set: std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(types.Diagnostic)) = .{}; - try errorBundleToDiagnostics(error_bundle, arena, &diagnostics_set, "ast-check", server.offset_encoding); - switch (diagnostics_set.count()) { - 0 => {}, - 1 => try diagnostics.appendSlice(arena, diagnostics_set.values()[0].items), - else => unreachable, // ast-check diagnostics only affect a single file - } + + try server.diagnostics_collection.pushErrorBundle(.parse, handle.version, null, error_bundle); } - if (server.getAutofixMode() != .none and tree.mode == .zig) { - const tracy_zone2 = tracy.traceNamed(@src(), "autofix"); - defer tracy_zone2.end(); + { + var arena_allocator = std.heap.ArenaAllocator.init(server.diagnostics_collection.allocator); + errdefer arena_allocator.deinit(); + const arena = arena_allocator.allocator(); - try code_actions.collectAutoDiscardDiagnostics(tree, arena, &diagnostics, server.offset_encoding); + var diagnostics: std.ArrayListUnmanaged(types.Diagnostic) = .{}; + try collectCimportDiagnostics(&server.document_store, handle, arena, &diagnostics, server.offset_encoding); + try server.diagnostics_collection.pushLspDiagnostics(.cimport, handle.uri, arena_allocator.state, diagnostics.items); } - if (server.config.warn_style and tree.mode == .zig) { - const tracy_zone2 = tracy.traceNamed(@src(), "warn style errors"); - defer tracy_zone2.end(); + std.debug.assert(server.client_capabilities.supports_publish_diagnostics); + server.diagnostics_collection.publishDiagnostics(transport, server.offset_encoding) catch |err| { + log.err("failed to publish diagnostics: {}", .{err}); + }; +} + +fn collectParseDiagnostics( + tree: Ast, + arena: std.mem.Allocator, + diagnostics: *std.ArrayListUnmanaged(types.Diagnostic), + offset_encoding: offsets.Encoding, +) error{OutOfMemory}!void { + const tracy_zone = tracy.trace(@src()); + defer tracy_zone.end(); + + try diagnostics.ensureUnusedCapacity(arena, tree.errors.len); + for (tree.errors) |err| { + var buffer: std.ArrayListUnmanaged(u8) = .{}; + try tree.renderError(err, buffer.writer(arena)); + + diagnostics.appendAssumeCapacity(.{ + .range = offsets.tokenToRange(tree, err.token, offset_encoding), + .severity = .Error, + .code = .{ .string = @tagName(err.tag) }, + .source = "zls", + .message = try buffer.toOwnedSlice(arena), + }); + } +} + +fn collectWarnStyleDiagnostics( + tree: Ast, + arena: std.mem.Allocator, + diagnostics: *std.ArrayListUnmanaged(types.Diagnostic), + offset_encoding: offsets.Encoding, +) error{OutOfMemory}!void { + const tracy_zone = tracy.trace(@src()); + defer tracy_zone.end(); - var node: u32 = 0; - while (node < tree.nodes.len) : (node += 1) { - if (ast.isBuiltinCall(tree, node)) { - const builtin_token = tree.nodes.items(.main_token)[node]; - const call_name = tree.tokenSlice(builtin_token); + var node: u32 = 0; + while (node < tree.nodes.len) : (node += 1) { + if (ast.isBuiltinCall(tree, node)) { + const builtin_token = tree.nodes.items(.main_token)[node]; + const call_name = tree.tokenSlice(builtin_token); - if (!std.mem.eql(u8, call_name, "@import")) continue; + if (!std.mem.eql(u8, call_name, "@import")) continue; - var buffer: [2]Ast.Node.Index = undefined; - const params = ast.builtinCallParams(tree, node, &buffer).?; + var buffer: [2]Ast.Node.Index = undefined; + const params = ast.builtinCallParams(tree, node, &buffer).?; - if (params.len != 1) continue; + if (params.len != 1) continue; - const import_str_token = tree.nodes.items(.main_token)[params[0]]; - const import_str = tree.tokenSlice(import_str_token); + const import_str_token = tree.nodes.items(.main_token)[params[0]]; + const import_str = tree.tokenSlice(import_str_token); - if (std.mem.startsWith(u8, import_str, "\"./")) { - try diagnostics.append(arena, .{ - .range = offsets.tokenToRange(tree, import_str_token, server.offset_encoding), - .severity = .Hint, - .code = .{ .string = "dot_slash_import" }, - .source = "zls", - .message = "A ./ is not needed in imports", - }); - } + if (std.mem.startsWith(u8, import_str, "\"./")) { + try diagnostics.append(arena, .{ + .range = offsets.tokenToRange(tree, import_str_token, offset_encoding), + .severity = .Hint, + .code = .{ .string = "dot_slash_import" }, + .source = "zls", + .message = "A ./ is not needed in imports", + }); } } + } - // TODO: style warnings for types, values and declarations below root scope - if (tree.errors.len == 0) { - for (tree.rootDecls()) |decl_idx| { - const decl = tree.nodes.items(.tag)[decl_idx]; - switch (decl) { - .fn_proto, - .fn_proto_multi, - .fn_proto_one, - .fn_proto_simple, - .fn_decl, - => blk: { - var buf: [1]Ast.Node.Index = undefined; - const func = tree.fullFnProto(&buf, decl_idx).?; - if (func.extern_export_inline_token != null) break :blk; - - if (func.name_token) |name_token| { - const is_type_function = Analyser.isTypeFunction(tree, func); - - const func_name = tree.tokenSlice(name_token); - if (!is_type_function and !Analyser.isCamelCase(func_name)) { - try diagnostics.append(arena, .{ - .range = offsets.tokenToRange(tree, name_token, server.offset_encoding), - .severity = .Hint, - .code = .{ .string = "bad_style" }, - .source = "zls", - .message = "Functions should be camelCase", - }); - } else if (is_type_function and !Analyser.isPascalCase(func_name)) { - try diagnostics.append(arena, .{ - .range = offsets.tokenToRange(tree, name_token, server.offset_encoding), - .severity = .Hint, - .code = .{ .string = "bad_style" }, - .source = "zls", - .message = "Type functions should be PascalCase", - }); - } + // TODO: style warnings for types, values and declarations below root scope + if (tree.errors.len == 0) { + for (tree.rootDecls()) |decl_idx| { + const decl = tree.nodes.items(.tag)[decl_idx]; + switch (decl) { + .fn_proto, + .fn_proto_multi, + .fn_proto_one, + .fn_proto_simple, + .fn_decl, + => blk: { + var buf: [1]Ast.Node.Index = undefined; + const func = tree.fullFnProto(&buf, decl_idx).?; + if (func.extern_export_inline_token != null) break :blk; + + if (func.name_token) |name_token| { + const is_type_function = Analyser.isTypeFunction(tree, func); + + const func_name = tree.tokenSlice(name_token); + if (!is_type_function and !Analyser.isCamelCase(func_name)) { + try diagnostics.append(arena, .{ + .range = offsets.tokenToRange(tree, name_token, offset_encoding), + .severity = .Hint, + .code = .{ .string = "bad_style" }, + .source = "zls", + .message = "Functions should be camelCase", + }); + } else if (is_type_function and !Analyser.isPascalCase(func_name)) { + try diagnostics.append(arena, .{ + .range = offsets.tokenToRange(tree, name_token, offset_encoding), + .severity = .Hint, + .code = .{ .string = "bad_style" }, + .source = "zls", + .message = "Type functions should be PascalCase", + }); } - }, - else => {}, - } + } + }, + else => {}, } } } +} - for (handle.cimports.items(.hash), handle.cimports.items(.node)) |hash, node| { - const tracy_zone2 = tracy.traceNamed(@src(), "cImport"); - defer tracy_zone2.end(); +fn collectCimportDiagnostics( + document_store: *DocumentStore, + handle: *DocumentStore.Handle, + arena: std.mem.Allocator, + diagnostics: *std.ArrayListUnmanaged(types.Diagnostic), + offset_encoding: offsets.Encoding, +) error{OutOfMemory}!void { + const tracy_zone = tracy.trace(@src()); + defer tracy_zone.end(); + for (handle.cimports.items(.hash), handle.cimports.items(.node)) |hash, node| { const result = blk: { - server.document_store.lock.lock(); - defer server.document_store.lock.unlock(); - break :blk server.document_store.cimports.get(hash) orelse continue; + document_store.lock.lock(); + defer document_store.lock.unlock(); + break :blk document_store.cimports.get(hash) orelse continue; }; const error_bundle: std.zig.ErrorBundle = switch (result) { .success => continue, @@ -162,7 +208,7 @@ pub fn generateDiagnostics(server: *Server, arena: std.mem.Allocator, handle: *D const err_msg = error_bundle.getErrorMessage(err_msg_index); diagnostics.appendAssumeCapacity(.{ - .range = offsets.nodeToRange(tree, node, server.offset_encoding), + .range = offsets.nodeToRange(handle.tree, node, offset_encoding), .severity = .Error, .code = .{ .string = "cImport" }, .source = "zls", @@ -170,43 +216,43 @@ pub fn generateDiagnostics(server: *Server, arena: std.mem.Allocator, handle: *D }); } } +} - if (server.config.highlight_global_var_declarations and tree.mode == .zig) { - const tracy_zone2 = tracy.traceNamed(@src(), "highlight global var"); - defer tracy_zone2.end(); +fn collectGlobalVarDiagnostics( + tree: Ast, + arena: std.mem.Allocator, + diagnostics: *std.ArrayListUnmanaged(types.Diagnostic), + offset_encoding: offsets.Encoding, +) error{OutOfMemory}!void { + const tracy_zone = tracy.trace(@src()); + defer tracy_zone.end(); - const main_tokens = tree.nodes.items(.main_token); - const tags = tree.tokens.items(.tag); - for (tree.rootDecls()) |decl| { - const decl_tag = tree.nodes.items(.tag)[decl]; - const decl_main_token = tree.nodes.items(.main_token)[decl]; - - switch (decl_tag) { - .simple_var_decl, - .aligned_var_decl, - .local_var_decl, - .global_var_decl, - => { - if (tags[main_tokens[decl]] != .keyword_var) continue; // skip anything immutable - // uncomment this to get a list :) - //log.debug("possible global variable \"{s}\"", .{tree.tokenSlice(decl_main_token + 1)}); - try diagnostics.append(arena, .{ - .range = offsets.tokenToRange(tree, decl_main_token, server.offset_encoding), - .severity = .Hint, - .code = .{ .string = "highlight_global_var_declarations" }, - .source = "zls", - .message = "Global var declaration", - }); - }, - else => {}, - } + const main_tokens = tree.nodes.items(.main_token); + const tags = tree.tokens.items(.tag); + for (tree.rootDecls()) |decl| { + const decl_tag = tree.nodes.items(.tag)[decl]; + const decl_main_token = tree.nodes.items(.main_token)[decl]; + + switch (decl_tag) { + .simple_var_decl, + .aligned_var_decl, + .local_var_decl, + .global_var_decl, + => { + if (tags[main_tokens[decl]] != .keyword_var) continue; // skip anything immutable + // uncomment this to get a list :) + //log.debug("possible global variable \"{s}\"", .{tree.tokenSlice(decl_main_token + 1)}); + try diagnostics.append(arena, .{ + .range = offsets.tokenToRange(tree, decl_main_token, offset_encoding), + .severity = .Hint, + .code = .{ .string = "highlight_global_var_declarations" }, + .source = "zls", + .message = "Global var declaration", + }); + }, + else => {}, } } - - return .{ - .uri = handle.uri, - .diagnostics = diagnostics.items, - }; } /// caller owns the returned ErrorBundle @@ -217,6 +263,12 @@ pub fn getAstCheckDiagnostics(server: *Server, handle: *DocumentStore.Handle) er std.debug.assert(handle.tree.errors.len == 0); std.debug.assert(handle.tree.mode == .zig); + const file_path = URI.parse(server.allocator, handle.uri) catch |err| { + log.err("failed to parse invalid uri '{s}': {}", .{ handle.uri, err }); + return .empty; + }; + defer server.allocator.free(file_path); + var eb: std.zig.ErrorBundle.Wip = undefined; try eb.init(server.allocator); defer eb.deinit(); @@ -229,6 +281,7 @@ pub fn getAstCheckDiagnostics(server: *Server, handle: *DocumentStore.Handle) er server.allocator, server.config.zig_exe_path.?, &server.zig_ast_check_lock, + file_path, handle.tree.source, &eb, ) catch |err| { @@ -239,7 +292,7 @@ pub fn getAstCheckDiagnostics(server: *Server, handle: *DocumentStore.Handle) er std.debug.assert(handle.getZirStatus() == .done); if (zir.hasCompileErrors()) { - try eb.addZirErrorMessages(zir, handle.tree, handle.tree.source, ""); + try eb.addZirErrorMessages(zir, handle.tree, handle.tree.source, file_path); } } @@ -250,6 +303,7 @@ fn getErrorBundleFromAstCheck( allocator: std.mem.Allocator, zig_exe_path: []const u8, zig_ast_check_lock: *std.Thread.Mutex, + file_path: []const u8, source: [:0]const u8, error_bundle: *std.zig.ErrorBundle.Wip, ) !void { @@ -293,6 +347,8 @@ fn getErrorBundleFromAstCheck( var notes: std.ArrayListUnmanaged(std.zig.ErrorBundle.MessageIndex) = .{}; defer notes.deinit(allocator); + const eb_file_path = try error_bundle.addString(file_path); + var line_iterator = std.mem.splitScalar(u8, stderr_bytes, '\n'); while (line_iterator.next()) |line| { var pos_and_diag_iterator = std.mem.splitScalar(u8, line, ':'); @@ -319,7 +375,7 @@ fn getErrorBundleFromAstCheck( } const src_loc = try error_bundle.addSourceLocation(.{ - .src_path = 0, + .src_path = eb_file_path, .line = utf8_position.line, .column = utf8_position.character, .span_start = @intCast(loc.start), @@ -362,72 +418,3 @@ fn getErrorBundleFromAstCheck( @memcpy(error_bundle.extra.items[notes_start..][0..em.notes_len], @as([]const u32, @ptrCast(notes.items))); } } - -pub fn errorBundleToDiagnostics( - error_bundle: std.zig.ErrorBundle, - arena: std.mem.Allocator, - diagnostics: *std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(types.Diagnostic)), - code: []const u8, - /// The diagnostic's code, which usually appear in the user interface. - offset_encoding: offsets.Encoding, -) error{OutOfMemory}!void { - if (error_bundle.errorMessageCount() == 0) return; // `getMessages` can't be called on an empty ErrorBundle - for (error_bundle.getMessages()) |msg_index| { - const err = error_bundle.getErrorMessage(msg_index); - if (err.src_loc == .none) continue; - - const src_loc = error_bundle.getSourceLocation(err.src_loc); - const src_path = error_bundle.nullTerminatedString(src_loc.src_path); - const src_range = errorBundleSourceLocationToRange(error_bundle, src_loc, offset_encoding); - - const eb_notes = error_bundle.getNotes(msg_index); - const lsp_notes = try arena.alloc(types.DiagnosticRelatedInformation, eb_notes.len); - for (lsp_notes, eb_notes) |*lsp_note, eb_note_index| { - const eb_note = error_bundle.getErrorMessage(eb_note_index); - if (eb_note.src_loc == .none) continue; - - const note_src_loc = error_bundle.getSourceLocation(eb_note.src_loc); - const note_src_path = error_bundle.nullTerminatedString(src_loc.src_path); - const note_src_range = errorBundleSourceLocationToRange(error_bundle, note_src_loc, offset_encoding); - - lsp_note.* = .{ - .location = .{ - .uri = try URI.fromPath(arena, note_src_path), - .range = note_src_range, - }, - .message = try arena.dupe(u8, error_bundle.nullTerminatedString(eb_note.msg)), - }; - } - - const uri = try URI.fromPath(arena, src_path); - - const gop = try diagnostics.getOrPutValue(arena, uri, .{}); - try gop.value_ptr.append(arena, .{ - .range = src_range, - .severity = .Error, - .code = .{ .string = code }, - .source = "zls", - .message = try arena.dupe(u8, error_bundle.nullTerminatedString(err.msg)), - .relatedInformation = lsp_notes, - }); - } -} - -fn errorBundleSourceLocationToRange( - error_bundle: std.zig.ErrorBundle, - src_loc: std.zig.ErrorBundle.SourceLocation, - offset_encoding: offsets.Encoding, -) types.Range { - const source_line = error_bundle.nullTerminatedString(src_loc.source_line); - - const source_line_range_utf8: types.Range = .{ - .start = .{ .line = 0, .character = src_loc.column - (src_loc.span_main - src_loc.span_start) }, - .end = .{ .line = 0, .character = src_loc.column + (src_loc.span_end - src_loc.span_main) }, - }; - const source_line_range = offsets.convertRangeEncoding(source_line, source_line_range_utf8, .@"utf-8", offset_encoding); - - return .{ - .start = .{ .line = src_loc.line, .character = source_line_range.start.character }, - .end = .{ .line = src_loc.line, .character = source_line_range.end.character }, - }; -} diff --git a/tests/lsp_features/code_actions.zig b/tests/lsp_features/code_actions.zig index 4e27ea28c..a4f8245f6 100644 --- a/tests/lsp_features/code_actions.zig +++ b/tests/lsp_features/code_actions.zig @@ -879,22 +879,11 @@ fn testDiagnostic( const uri = try ctx.addDocument(.{ .source = source }); const handle = ctx.server.document_store.getHandle(uri).?; - var error_bundle = try zls.diagnostics.getAstCheckDiagnostics(ctx.server, handle); - defer error_bundle.deinit(ctx.server.allocator); - var diagnostics_set: std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(types.Diagnostic)) = .{}; - try zls.diagnostics.errorBundleToDiagnostics(error_bundle, ctx.arena.allocator(), &diagnostics_set, "ast-check", ctx.server.offset_encoding); - - const diagnostics: []const types.Diagnostic = switch (diagnostics_set.count()) { - 0 => &.{}, - 1 => diagnostics_set.values()[0].items, - else => unreachable, // ast-check diagnostics only affect a single file - }; - const params: types.CodeActionParams = .{ .textDocument = .{ .uri = uri }, .range = range, .context = .{ - .diagnostics = diagnostics, + .diagnostics = &.{}, .only = if (options.filter_kind) |kind| &.{kind} else null, }, }; From a1e195c46945e702440b9acfa24091acbc421eaa Mon Sep 17 00:00:00 2001 From: Techatrix Date: Wed, 27 Nov 2024 15:31:22 +0100 Subject: [PATCH 07/31] server error bundles through the build runner --- src/build_runner/master.zig | 102 ++++++++++++++++++++++++++++-------- src/build_runner/shared.zig | 20 +++++++ 2 files changed, 100 insertions(+), 22 deletions(-) diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index 4bf0bc238..42aac0688 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -121,6 +121,7 @@ pub fn main() !void { var seed: u32 = 0; var output_tmp_nonce: ?[16]u8 = null; var debounce_interval_ms: u16 = 50; + var watch = false; while (nextArg(args, &arg_idx)) |arg| { if (mem.startsWith(u8, arg, "-Z")) { @@ -243,8 +244,7 @@ pub fn main() !void { } else if (mem.eql(u8, arg, "--prominent-compile-errors")) { // prominent_compile_errors = true; } else if (mem.eql(u8, arg, "--watch")) { - // watch mode will always be enabled if supported - // watch = true; + watch = true; } else if (mem.eql(u8, arg, "-fwine")) { builder.enable_wine = true; } else if (mem.eql(u8, arg, "-fno-wine")) { @@ -349,6 +349,9 @@ pub fn main() !void { .thread_pool = undefined, // set below .claimed_rss = 0, + + .transport = null, + .cycle = 0, }; if (run.max_rss == 0) { @@ -359,30 +362,44 @@ pub fn main() !void { try run.thread_pool.init(thread_pool_options); defer run.thread_pool.deinit(); - const gpa = arena; - try extractBuildInformation( - gpa, - builder, - arena, - main_progress_node, - &run, - seed, - ); + if (!watch) { + try extractBuildInformation( + arena, + builder, + arena, + main_progress_node, + &run, + seed, + ); + return; + } if (!Watch.have_impl) return; var w = try Watch.init(); + const gpa = arena; + var transport = Transport.init(.{ + .gpa = gpa, + .in = std.io.getStdIn(), + .out = std.io.getStdOut(), + }); + defer transport.deinit(); + + run.transport = &transport; + var step_stack = try stepNamesToStepStack(gpa, builder, targets.items); + if (step_stack.count() == 0) { + // This means that `enable_build_on_save == null` and the project contains no "check" step. + return; + } prepare(gpa, builder, &step_stack, &run, seed) catch |err| switch (err) { error.UncleanExit => process.exit(1), else => return err, }; - // TODO watch mode is currently always disabled until ZLS supports it - rebuild: while (false) { + rebuild: while (true) : (run.cycle += 1) { runSteps( - gpa, builder, step_stack.keys(), main_progress_node, @@ -435,6 +452,9 @@ const Run = struct { thread_pool: std.Thread.Pool, claimed_rss: usize, + + transport: ?*Transport, + cycle: u32, }; fn stepNamesToStepStack( @@ -446,8 +466,12 @@ fn stepNamesToStepStack( errdefer step_stack.deinit(gpa); if (step_names.len == 0) { - const default_step = if (b.top_level_steps.get("check")) |tls| &tls.step else b.default_step; - try step_stack.put(gpa, default_step, {}); + if (b.top_level_steps.get("check")) |tls| { + try step_stack.put(gpa, &tls.step, {}); + } else { + // TODO remove once ZLS respects `enable_build_on_save` and `build_on_save_args` + try step_stack.put(gpa, b.default_step, {}); + } } else { try step_stack.ensureUnusedCapacity(gpa, step_names.len); for (0..step_names.len) |i| { @@ -534,6 +558,16 @@ fn runSteps( &wait_group, b, step, step_prog, run, }) catch @panic("OOM"); } + + if (run.transport) |transport| { + for (steps) |step| { + // missing fields: + // - result_error_msgs + // - result_stderr + // TODO step_id + serveWatchErrorBundle(transport, 0, run.cycle, step.result_error_bundle) catch @panic("failed to send watch errors"); + } + } } /// Traverse the dependency graph depth-first and make it undirected by having @@ -652,12 +686,12 @@ fn workerMakeOneStep( .watch = true, }); - const show_error_msgs = s.result_error_msgs.items.len > 0; - const show_compile_errors = s.result_error_bundle.errorMessageCount() > 0; - - if (show_error_msgs or show_compile_errors) { - // s.result_stderr - // TODO send to ZLS + if (run.transport) |transport| { + // missing fields: + // - result_error_msgs + // - result_stderr + // TODO step_id + serveWatchErrorBundle(transport, 0, run.cycle, s.result_error_bundle) catch @panic("failed to send watch errors"); } handle_result: { @@ -782,6 +816,7 @@ fn validateSystemLibraryOptions(b: *std.Build) void { // const shared = @import("shared.zig"); +const Transport = shared.Transport; const BuildConfig = shared.BuildConfig; const Packages = struct { @@ -1282,3 +1317,26 @@ const copied_from_zig = struct { } } }; + +fn serveWatchErrorBundle( + transport: *Transport, + step_id: u32, + cycle: u32, + error_bundle: std.zig.ErrorBundle, +) !void { + const eb_hdr: shared.ServerToClient.ErrorBundle = .{ + .step_id = step_id, + .cycle = cycle, + .extra_len = @intCast(error_bundle.extra.len), + .string_bytes_len = @intCast(error_bundle.string_bytes.len), + }; + const bytes_len = @sizeOf(shared.ServerToClient.ErrorBundle) + 4 * error_bundle.extra.len + error_bundle.string_bytes.len; + try transport.serveMessage(.{ + .tag = @intFromEnum(shared.ServerToClient.Tag.watch_error_bundle), + .bytes_len = @intCast(bytes_len), + }, &.{ + std.mem.asBytes(&eb_hdr), + std.mem.sliceAsBytes(error_bundle.extra), + error_bundle.string_bytes, + }); +} diff --git a/src/build_runner/shared.zig b/src/build_runner/shared.zig index 7aa69b4f5..18b9d64e4 100644 --- a/src/build_runner/shared.zig +++ b/src/build_runner/shared.zig @@ -124,3 +124,23 @@ pub const Transport = struct { try client.out.writevAll(iovecs[0 .. bufs.len + 1]); } }; + +pub const ServerToClient = struct { + pub const Tag = enum(u32) { + /// Body is an ErrorBundle. + watch_error_bundle, + + _, + }; + + /// Trailing: + /// * extra: [extra_len]u32, + /// * string_bytes: [string_bytes_len]u8, + /// See `std.zig.ErrorBundle`. + pub const ErrorBundle = extern struct { + step_id: u32, + cycle: u32, + extra_len: u32, + string_bytes_len: u32, + }; +}; From 51d93d7838e3ca4c2109f3d637444bed455dce86 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 28 Nov 2024 17:18:50 +0100 Subject: [PATCH 08/31] add missig fincremental flag to build runner --- src/build_runner/master.zig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index 42aac0688..0d49232cf 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -245,6 +245,10 @@ pub fn main() !void { // prominent_compile_errors = true; } else if (mem.eql(u8, arg, "--watch")) { watch = true; + } else if (mem.eql(u8, arg, "-fincremental")) { + graph.incremental = true; + } else if (mem.eql(u8, arg, "-fno-incremental")) { + graph.incremental = false; } else if (mem.eql(u8, arg, "-fwine")) { builder.enable_wine = true; } else if (mem.eql(u8, arg, "-fno-wine")) { From 64bc566722481a17ae891dff7d8ed13b26873651 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 28 Nov 2024 17:19:23 +0100 Subject: [PATCH 09/31] remove bad call to std.process.Child.kill --- src/translate_c.zig | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/translate_c.zig b/src/translate_c.zig index 7722478b5..bc26ea7f9 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -191,11 +191,8 @@ pub fn translate( return null; }; - defer _ = process.wait() catch |wait_err| blk: { + defer _ = process.wait() catch |wait_err| { log.err("zig translate-c process did not terminate, error: {}", .{wait_err}); - break :blk process.kill() catch |kill_err| { - std.debug.panic("failed to terminate zig translate-c process, error: {}", .{kill_err}); - }; }; var zcs = ZCSTransport.init(.{ From cd148cc82d32ed15ef3ecbd17d32b79247c51acb Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 28 Nov 2024 17:21:21 +0100 Subject: [PATCH 10/31] add a "check" step --- build.zig | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/build.zig b/build.zig index aab9f25ad..108a8381b 100644 --- a/build.zig +++ b/build.zig @@ -179,6 +179,25 @@ pub fn build(b: *Build) !void { exe.root_module.addImport("zls", zls_module); b.installArtifact(exe); + { + const exe_check = b.addExecutable(.{ + .name = "zls", + .root_source_file = b.path("src/main.zig"), + .target = target, + .optimize = optimize, + .single_threaded = single_threaded, + }); + exe_check.root_module.addImport("exe_options", exe_options_module); + exe_check.root_module.addImport("tracy", tracy_module); + exe_check.root_module.addImport("diffz", diffz_module); + exe_check.root_module.addImport("lsp", lsp_module); + exe_check.root_module.addImport("known-folders", known_folders_module); + exe_check.root_module.addImport("zls", zls_module); + + const check = b.step("check", "Check if ZLS compiles"); + check.dependOn(&exe_check.step); + } + const test_step = b.step("test", "Run all the tests"); const tests = b.addTest(.{ From fcc2a3ca16d95a0b561abe1a6dbb26b5540eb5e3 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 28 Nov 2024 17:25:43 +0100 Subject: [PATCH 11/31] improve some configuration log messages --- src/Server.zig | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index 45f037a8e..856ed54df 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -877,18 +877,9 @@ pub fn updateConfiguration( // apply changes // <----------------------------------------------------------> - const new_zig_exe_path = - new_config.zig_exe_path != null and - (server.config.zig_exe_path == null or !std.mem.eql(u8, server.config.zig_exe_path.?, new_config.zig_exe_path.?)); - const new_zig_lib_path = - new_config.zig_lib_path != null and - (server.config.zig_lib_path == null or !std.mem.eql(u8, server.config.zig_lib_path.?, new_config.zig_lib_path.?)); - const new_build_runner_path = - new_config.build_runner_path != null and - (server.config.build_runner_path == null or !std.mem.eql(u8, server.config.build_runner_path.?, new_config.build_runner_path.?)); - const new_force_autofix = new_config.force_autofix != null and server.config.force_autofix != new_config.force_autofix.?; + var has_changed: [std.meta.fields(Config).len]bool = .{false} ** std.meta.fields(Config).len; - inline for (std.meta.fields(Config)) |field| { + inline for (std.meta.fields(Config), 0..) |field, field_index| { if (@field(new_config, field.name)) |new_value| { const old_value_maybe_optional = @field(server.config, field.name); @@ -915,6 +906,7 @@ pub fn updateConfiguration( var runtime_known_field_name: []const u8 = ""; // avoid unnecessary function instantiations of `std.fmt.format` runtime_known_field_name = field.name; log.info("Set config option '{s}' to {}", .{ runtime_known_field_name, std.json.fmt(new_value, .{}) }); + has_changed[field_index] = true; @field(server.config, field.name) = switch (@TypeOf(new_value)) { []const []const u8 => blk: { const copy = try config_arena.alloc([]const u8, new_value.len); @@ -928,6 +920,11 @@ pub fn updateConfiguration( } } + const new_zig_exe_path = has_changed[std.meta.fieldIndex(Config, "zig_exe_path").?]; + const new_zig_lib_path = has_changed[std.meta.fieldIndex(Config, "zig_lib_path").?]; + const new_build_runner_path = has_changed[std.meta.fieldIndex(Config, "build_runner_path").?]; + const new_force_autofix = has_changed[std.meta.fieldIndex(Config, "force_autofix").?]; + server.document_store.config = DocumentStore.Config.fromMainConfig(server.config); if (new_zig_exe_path or new_build_runner_path) blk: { @@ -965,9 +962,11 @@ pub fn updateConfiguration( // don't modify config options after here, only show messages // <----------------------------------------------------------> + // TODO there should a way to suppress this message if (std.process.can_spawn and server.status == .initialized and server.config.zig_exe_path == null) { - // TODO there should a way to suppress this message server.showMessage(.Warning, "zig executable could not be found", .{}); + } else if (std.process.can_spawn and server.status == .initialized and server.config.zig_lib_path == null) { + server.showMessage(.Warning, "zig standard library directory could not be resolved", .{}); } switch (resolve_result.build_runner_version) { @@ -1007,17 +1006,19 @@ pub fn updateConfiguration( if (!std.process.can_spawn) { log.info("'prefer_ast_check_as_child_process' is ignored because your OS can't spawn a child process", .{}); } else if (server.status == .initialized and server.config.zig_exe_path == null) { - log.info("'prefer_ast_check_as_child_process' is ignored because Zig could not be found", .{}); + log.warn("'prefer_ast_check_as_child_process' is ignored because Zig could not be found", .{}); } } if (server.config.enable_build_on_save orelse false) { if (!std.process.can_spawn) { log.info("'enable_build_on_save' is ignored because your OS can't spawn a child process", .{}); - } else if (server.status == .initialized and server.config.zig_exe_path == null) { - log.info("'enable_build_on_save' is ignored because Zig could not be found", .{}); + } else if (server.status == .initialized and (server.config.zig_exe_path == null or server.config.zig_lib_path == null)) { + log.warn("'enable_build_on_save' is ignored because Zig could not be found", .{}); } else if (!server.client_capabilities.supports_publish_diagnostics) { - log.info("'enable_build_on_save' is ignored because it is not supported by {s}", .{server.client_capabilities.client_name orelse "your editor"}); + log.warn("'enable_build_on_save' is ignored because it is not supported by {s}", .{server.client_capabilities.client_name orelse "your editor"}); + } else if (server.status == .initialized and resolve_result.build_runner_version != .resolved) { + log.warn("'enable_build_on_save' is ignored because no build runner is available", .{}); } } From bdfd1448e355e8e48c6db88b63e7d3b4a3f6164f Mon Sep 17 00:00:00 2001 From: Techatrix Date: Fri, 29 Nov 2024 15:44:55 +0100 Subject: [PATCH 12/31] implement build-on-save with --watch --- src/Server.zig | 95 ++++++++++++++--- src/build_runner/master.zig | 17 ++- src/features/diagnostics.zig | 196 ++++++++++++++++++++++++++++++++++- 3 files changed, 292 insertions(+), 16 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index 856ed54df..1428f72f5 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -69,14 +69,6 @@ workspaces: std.ArrayListUnmanaged(Workspace) = .{}, // Code was based off of https://github.com/andersfr/zig-lsp/blob/master/server.zig -const Workspace = struct { - uri: types.URI, - - fn deinit(self: *Workspace, allocator: std.mem.Allocator) void { - allocator.free(self.uri); - } -}; - const ClientCapabilities = struct { supports_snippets: bool = false, supports_apply_edits: bool = false, @@ -769,11 +761,76 @@ fn handleConfiguration(server: *Server, json: std.json.Value) error{OutOfMemory} }; } -fn addWorkspace(server: *Server, uri: types.URI) !void { +const Workspace = struct { + uri: types.URI, + build_on_save: if (build_on_save_supported) ?diagnostics_gen.BuildOnSave else void, + + pub const build_on_save_supported = std.process.can_spawn and !zig_builtin.single_threaded; + + fn init(server: *Server, uri: types.URI) error{OutOfMemory}!Workspace { + const duped_uri = try server.allocator.dupe(u8, uri); + errdefer server.allocator.free(duped_uri); + + return .{ + .uri = duped_uri, + .build_on_save = if (build_on_save_supported) null else {}, + }; + } + + fn deinit(workspace: *Workspace, allocator: std.mem.Allocator) void { + if (build_on_save_supported) { + if (workspace.build_on_save) |*build_on_save| build_on_save.deinit(); + } + allocator.free(workspace.uri); + } + + fn startOrRestartBuildOnSave(workspace: *Workspace, server: *Server) error{OutOfMemory}!void { + comptime std.debug.assert(build_on_save_supported); + + if (workspace.build_on_save) |*build_on_save| { + build_on_save.deinit(); + workspace.build_on_save = null; + } + + const enable_build_on_save = server.config.enable_build_on_save orelse true; + if (!enable_build_on_save) return; + + const zig_exe_path = server.config.zig_exe_path orelse return; + const zig_lib_path = server.config.zig_lib_path orelse return; + const build_runner_path = server.config.build_runner_path orelse return; + const transport = server.transport orelse return; + + const workspace_path = @import("uri.zig").parse(server.allocator, workspace.uri) catch |err| { + log.err("failed to parse URI '{s}': {}", .{ workspace.uri, err }); + return; + }; + defer server.allocator.free(workspace_path); + + workspace.build_on_save = @as(diagnostics_gen.BuildOnSave, undefined); + workspace.build_on_save.?.init(.{ + .allocator = server.allocator, + .workspace_path = workspace_path, + .build_on_save_args = server.config.build_on_save_args, + .check_step_only = server.config.enable_build_on_save == null, + .zig_exe_path = zig_exe_path, + .zig_lib_path = zig_lib_path, + .build_runner_path = build_runner_path, + .collection = &server.diagnostics_collection, + .lsp_transport = transport, + .offset_encoding = server.offset_encoding, + }) catch |err| { + workspace.build_on_save = null; + log.err("failed to initilize Build-On-Save for '{s}': {}", .{ workspace.uri, err }); + return; + }; + + log.info("started Build-On-Save for '{s}'", .{workspace.uri}); + } +}; + +fn addWorkspace(server: *Server, uri: types.URI) error{OutOfMemory}!void { try server.workspaces.ensureUnusedCapacity(server.allocator, 1); - server.workspaces.appendAssumeCapacity(.{ - .uri = try server.allocator.dupe(u8, uri), - }); + server.workspaces.appendAssumeCapacity(try Workspace.init(server, uri)); log.info("added Workspace Folder: {s}", .{uri}); } @@ -923,6 +980,8 @@ pub fn updateConfiguration( const new_zig_exe_path = has_changed[std.meta.fieldIndex(Config, "zig_exe_path").?]; const new_zig_lib_path = has_changed[std.meta.fieldIndex(Config, "zig_lib_path").?]; const new_build_runner_path = has_changed[std.meta.fieldIndex(Config, "build_runner_path").?]; + const new_enable_build_on_save = has_changed[std.meta.fieldIndex(Config, "enable_build_on_save").?]; + const new_build_on_save_args = has_changed[std.meta.fieldIndex(Config, "build_on_save_args").?]; const new_force_autofix = has_changed[std.meta.fieldIndex(Config, "force_autofix").?]; server.document_store.config = DocumentStore.Config.fromMainConfig(server.config); @@ -935,6 +994,18 @@ pub fn updateConfiguration( } } + if (Workspace.build_on_save_supported and + (new_zig_exe_path or + new_zig_lib_path or + new_build_runner_path or + new_enable_build_on_save or + new_build_on_save_args)) + { + for (server.workspaces.items) |*workspace| { + try workspace.startOrRestartBuildOnSave(server); + } + } + if (new_zig_exe_path or new_zig_lib_path) { for (server.document_store.cimports.values()) |*result| { result.deinit(server.document_store.allocator); diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index 0d49232cf..2046922d8 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -122,6 +122,7 @@ pub fn main() !void { var output_tmp_nonce: ?[16]u8 = null; var debounce_interval_ms: u16 = 50; var watch = false; + var check_step_only = false; while (nextArg(args, &arg_idx)) |arg| { if (mem.startsWith(u8, arg, "-Z")) { @@ -245,6 +246,8 @@ pub fn main() !void { // prominent_compile_errors = true; } else if (mem.eql(u8, arg, "--watch")) { watch = true; + } else if (mem.eql(u8, arg, "--check-only")) { // ZLS only + check_step_only = true; } else if (mem.eql(u8, arg, "-fincremental")) { graph.incremental = true; } else if (mem.eql(u8, arg, "-fno-incremental")) { @@ -391,7 +394,7 @@ pub fn main() !void { run.transport = &transport; - var step_stack = try stepNamesToStepStack(gpa, builder, targets.items); + var step_stack = try stepNamesToStepStack(gpa, builder, targets.items, check_step_only); if (step_stack.count() == 0) { // This means that `enable_build_on_save == null` and the project contains no "check" step. return; @@ -402,6 +405,14 @@ pub fn main() !void { else => return err, }; + const suicide_thread = try std.Thread.spawn(.{ .allocator = gpa }, struct { + fn do(t: *Transport) void { + const header = t.receiveMessage(null) catch process.exit(1); + process.exit(if (header.tag == 0) 0 else 1); + } + }.do, .{&transport}); + suicide_thread.detach(); + rebuild: while (true) : (run.cycle += 1) { runSteps( builder, @@ -465,6 +476,7 @@ fn stepNamesToStepStack( gpa: Allocator, b: *std.Build, step_names: []const []const u8, + check_step_only: bool, ) !std.AutoArrayHashMapUnmanaged(*Step, void) { var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; errdefer step_stack.deinit(gpa); @@ -472,8 +484,7 @@ fn stepNamesToStepStack( if (step_names.len == 0) { if (b.top_level_steps.get("check")) |tls| { try step_stack.put(gpa, &tls.step, {}); - } else { - // TODO remove once ZLS respects `enable_build_on_save` and `build_on_save_args` + } else if (!check_step_only) { try step_stack.put(gpa, b.default_step, {}); } } else { diff --git a/src/features/diagnostics.zig b/src/features/diagnostics.zig index da210c489..69a3d5db4 100644 --- a/src/features/diagnostics.zig +++ b/src/features/diagnostics.zig @@ -6,7 +6,8 @@ const log = std.log.scoped(.zls_diag); const Server = @import("../Server.zig"); const DocumentStore = @import("../DocumentStore.zig"); -const types = @import("lsp").types; +const lsp = @import("lsp"); +const types = lsp.types; const Analyser = @import("../analysis.zig"); const ast = @import("../ast.zig"); const offsets = @import("../offsets.zig"); @@ -418,3 +419,196 @@ fn getErrorBundleFromAstCheck( @memcpy(error_bundle.extra.items[notes_start..][0..em.notes_len], @as([]const u32, @ptrCast(notes.items))); } } + +/// This struct is not relocatable after initilization. +pub const BuildOnSave = struct { + allocator: std.mem.Allocator, + child_process: std.process.Child, + thread: std.Thread, + + const shared = @import("../build_runner/shared.zig"); + const Transport = shared.Transport; + const ServerToClient = shared.ServerToClient; + + pub const InitOptions = struct { + allocator: std.mem.Allocator, + workspace_path: []const u8, + build_on_save_args: []const []const u8, + check_step_only: bool, + zig_exe_path: []const u8, + zig_lib_path: []const u8, + build_runner_path: []const u8, + + collection: *DiagnosticsCollection, + lsp_transport: lsp.AnyTransport, + offset_encoding: offsets.Encoding, + }; + + pub fn init(self: *BuildOnSave, options: InitOptions) !void { + self.* = undefined; + + const base_args: []const []const u8 = &.{ + options.zig_exe_path, + "build", + "--build-runner", + options.build_runner_path, + "--zig-lib-dir", + options.zig_lib_path, + "--watch", + }; + var argv = try std.ArrayListUnmanaged([]const u8).initCapacity( + options.allocator, + base_args.len + options.build_on_save_args.len + @intFromBool(options.check_step_only), + ); + defer argv.deinit(options.allocator); + + argv.appendSliceAssumeCapacity(base_args); + if (options.check_step_only) argv.appendAssumeCapacity("--check-only"); + argv.appendSliceAssumeCapacity(options.build_on_save_args); + + var child_process = std.process.Child.init(argv.items, options.allocator); + child_process.stdin_behavior = .Pipe; + child_process.stdout_behavior = .Pipe; + child_process.stderr_behavior = .Pipe; + child_process.cwd = options.workspace_path; + + child_process.spawn() catch |err| { + log.err("failed to spawn zig build process: {}", .{err}); + return; + }; + + errdefer _ = child_process.kill() catch |err| { + std.debug.panic("failed to terminate build runner process, error: {}", .{err}); + }; + + self.* = .{ + .allocator = options.allocator, + .child_process = child_process, + .thread = undefined, // set below + }; + + const duped_workspace_path = try options.allocator.dupe(u8, options.workspace_path); + errdefer options.allocator.free(duped_workspace_path); + + self.thread = try std.Thread.spawn(.{ .allocator = options.allocator }, loop, .{ + self, + options.collection, + options.lsp_transport, + options.offset_encoding, + duped_workspace_path, + }); + } + + pub fn deinit(self: *BuildOnSave) void { + var transport = Transport.init(.{ + .gpa = self.allocator, + .in = self.child_process.stdout.?, + .out = self.child_process.stdin.?, + }); + + transport.serveMessage(.{ .tag = 0, .bytes_len = 0 }, &.{}) catch |err| { + log.warn("failed to send message to zig build runner: {}", .{err}); + return; + }; + + const stderr_file = self.child_process.stderr.?; + const stderr = stderr_file.readToEndAlloc(self.allocator, 16 * 1024 * 1024) catch ""; + defer self.allocator.free(stderr); + + const term = self.child_process.wait() catch |err| { + log.warn("Failed to await zig build runner: {}", .{err}); + return; + }; + + switch (term) { + .Exited => |code| if (code != 0) { + if (stderr.len != 0) { + log.warn("zig build runner exited with non-zero status: {}\nstderr:\n{s}", .{ code, stderr }); + } else { + log.warn("zig build runner exited with non-zero status: {}", .{code}); + } + }, + else => { + if (stderr.len != 0) { + log.warn("zig build runner exitied abnormally: {s}\nstderr:\n{s}", .{ @tagName(term), stderr }); + } else { + log.warn("zig build runner exitied abnormally: {s}", .{@tagName(term)}); + } + }, + } + + self.thread.join(); + self.* = undefined; + } + + fn loop( + self: *BuildOnSave, + collection: *DiagnosticsCollection, + lsp_transport: lsp.AnyTransport, + offset_encoding: offsets.Encoding, + workspace_path: []const u8, + ) void { + defer self.allocator.free(workspace_path); + + var transport = Transport.init(.{ + .gpa = self.allocator, + .in = self.child_process.stdout.?, + .out = self.child_process.stdin.?, + }); + defer transport.deinit(); + + while (true) { + const header = transport.receiveMessage(null) catch |err| { + log.err("failed to receive message from build runner: {}", .{err}); + return; + }; + + switch (@as(ServerToClient.Tag, @enumFromInt(header.tag))) { + .watch_error_bundle => { + handleWatchErrorBundle( + self, + &transport, + collection, + lsp_transport, + offset_encoding, + workspace_path, + ) catch |err| { + log.err("failed to handle error bundle message from build runner: {}", .{err}); + return; + }; + }, + else => |tag| { + log.warn("received unexpected message from build runner: {}", .{tag}); + }, + } + } + } + + fn handleWatchErrorBundle( + self: *BuildOnSave, + transport: *Transport, + collection: *DiagnosticsCollection, + lsp_transport: lsp.AnyTransport, + offset_encoding: offsets.Encoding, + workspace_path: []const u8, + ) !void { + const header = try transport.reader().readStructEndian(ServerToClient.ErrorBundle, .little); + + const extra = try transport.receiveSlice(self.allocator, u32, header.extra_len); + defer self.allocator.free(extra); + + const string_bytes = try transport.receiveBytes(self.allocator, header.string_bytes_len); + defer self.allocator.free(string_bytes); + + const error_bundle: std.zig.ErrorBundle = .{ .string_bytes = string_bytes, .extra = extra }; + + var hasher = std.hash.Wyhash.init(0); + hasher.update(workspace_path); + std.hash.autoHash(&hasher, header.step_id); + + const diagnostic_tag: DiagnosticsCollection.Tag = @enumFromInt(@as(u32, @truncate(hasher.final()))); + + try collection.pushErrorBundle(diagnostic_tag, header.cycle, workspace_path, error_bundle); + try collection.publishDiagnostics(lsp_transport, offset_encoding); + } +}; From 050ca30311b0e446b261d73265257049e0bf123f Mon Sep 17 00:00:00 2001 From: Techatrix Date: Fri, 29 Nov 2024 15:50:33 +0100 Subject: [PATCH 13/31] check whether the operating system supports --watch --- src/Server.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Server.zig b/src/Server.zig index 1428f72f5..c085c9740 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -765,7 +765,10 @@ const Workspace = struct { uri: types.URI, build_on_save: if (build_on_save_supported) ?diagnostics_gen.BuildOnSave else void, - pub const build_on_save_supported = std.process.can_spawn and !zig_builtin.single_threaded; + pub const build_on_save_supported = + std.process.can_spawn and + !zig_builtin.single_threaded and + std.Build.Watch.have_impl; fn init(server: *Server, uri: types.URI) error{OutOfMemory}!Workspace { const duped_uri = try server.allocator.dupe(u8, uri); From a3f37335d547cbdbbbfc6cc3dff5cca34bd85c77 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Fri, 29 Nov 2024 17:31:47 +0100 Subject: [PATCH 14/31] defer build-on-save initialization until after workspace configuration has been received --- src/Server.zig | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/Server.zig b/src/Server.zig index c085c9740..8f19733e2 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -649,8 +649,15 @@ fn initializedHandler(server: *Server, _: std.mem.Allocator, notification: types try server.registerCapability("workspace/didChangeConfiguration"); } - if (server.client_capabilities.supports_configuration) + if (server.client_capabilities.supports_configuration) { try server.requestConfiguration(); + } else { + if (Workspace.build_on_save_supported) { + for (server.workspaces.items) |*workspace| { + try workspace.startOrRestartBuildOnSave(server); + } + } + } if (std.crypto.random.intRangeLessThan(usize, 0, 32768) == 0) { server.showMessage(.Warning, "HELP ME, I AM STUCK INSIDE AN LSP!", .{}); @@ -998,6 +1005,7 @@ pub fn updateConfiguration( } if (Workspace.build_on_save_supported and + server.status == .initialized and (new_zig_exe_path or new_zig_lib_path or new_build_runner_path or From b6c195a372f4a7d9665b39d3c3ab11dbf0a67cde Mon Sep 17 00:00:00 2001 From: Techatrix Date: Fri, 29 Nov 2024 17:32:44 +0100 Subject: [PATCH 15/31] windows or whatever --- src/build_runner/master.zig | 16 ++++++++-------- src/features/diagnostics.zig | 18 ++++++++---------- 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index 2046922d8..7510be792 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -381,6 +381,14 @@ pub fn main() !void { return; } + const suicide_thread = try std.Thread.spawn(.{}, struct { + fn do() void { + _ = std.io.getStdIn().reader().readByte() catch process.exit(1); + process.exit(0); + } + }.do, .{}); + suicide_thread.detach(); + if (!Watch.have_impl) return; var w = try Watch.init(); @@ -405,14 +413,6 @@ pub fn main() !void { else => return err, }; - const suicide_thread = try std.Thread.spawn(.{ .allocator = gpa }, struct { - fn do(t: *Transport) void { - const header = t.receiveMessage(null) catch process.exit(1); - process.exit(if (header.tag == 0) 0 else 1); - } - }.do, .{&transport}); - suicide_thread.detach(); - rebuild: while (true) : (run.cycle += 1) { runSteps( builder, diff --git a/src/features/diagnostics.zig b/src/features/diagnostics.zig index 69a3d5db4..982a5164d 100644 --- a/src/features/diagnostics.zig +++ b/src/features/diagnostics.zig @@ -500,13 +500,8 @@ pub const BuildOnSave = struct { } pub fn deinit(self: *BuildOnSave) void { - var transport = Transport.init(.{ - .gpa = self.allocator, - .in = self.child_process.stdout.?, - .out = self.child_process.stdin.?, - }); - - transport.serveMessage(.{ .tag = 0, .bytes_len = 0 }, &.{}) catch |err| { + // this write tells the child process to exit + self.child_process.stdin.?.writeAll("\xaa") catch |err| { log.warn("failed to send message to zig build runner: {}", .{err}); return; }; @@ -558,9 +553,12 @@ pub const BuildOnSave = struct { defer transport.deinit(); while (true) { - const header = transport.receiveMessage(null) catch |err| { - log.err("failed to receive message from build runner: {}", .{err}); - return; + const header = transport.receiveMessage(null) catch |err| switch (err) { + error.EndOfStream => return, + else => { + log.err("failed to receive message from build runner: {}", .{err}); + return; + }, }; switch (@as(ServerToClient.Tag, @enumFromInt(header.tag))) { From f90926264d4fe98bd66b33bfe5f7c24a6383e405 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Sun, 1 Dec 2024 16:50:43 +0100 Subject: [PATCH 16/31] publish cImport diagnostics using error bundles This also fixes a minor bug where cImport diagnostics would only be reported after a document has been modified --- src/DiagnosticsCollection.zig | 17 ++++----- src/DocumentStore.zig | 65 +++++++++++++++++++++++++++++++++++ src/Server.zig | 11 ++++-- src/features/diagnostics.zig | 63 ++------------------------------- src/main.zig | 2 +- src/translate_c.zig | 2 +- 6 files changed, 86 insertions(+), 74 deletions(-) diff --git a/src/DiagnosticsCollection.zig b/src/DiagnosticsCollection.zig index 507702714..1e9584b5e 100644 --- a/src/DiagnosticsCollection.zig +++ b/src/DiagnosticsCollection.zig @@ -15,6 +15,8 @@ tag_set: std.AutoArrayHashMapUnmanaged(Tag, struct { }) = .{}, }) = .{}, outdated_files: std.StringArrayHashMapUnmanaged(void) = .{}, +transport: ?lsp.AnyTransport = null, +offset_encoding: ?offsets.Encoding = null, const DiagnosticsCollection = @This(); @@ -55,7 +57,7 @@ pub fn pushLspDiagnostics( document_uri: []const u8, diagnostics_arena_state: std.heap.ArenaAllocator.State, diagnostics: []lsp.types.Diagnostic, -) !void { +) error{OutOfMemory}!void { collection.mutex.lock(); defer collection.mutex.unlock(); @@ -93,7 +95,7 @@ pub fn pushErrorBundle( /// The current implementation assumes that the base path is always the same for the same tag. src_base_path: ?[]const u8, error_bundle: std.zig.ErrorBundle, -) !void { +) error{OutOfMemory}!void { var new_error_bundle: std.zig.ErrorBundle.Wip = undefined; try new_error_bundle.init(collection.allocator); defer new_error_bundle.deinit(); @@ -145,7 +147,7 @@ fn collectUrisFromErrorBundle( error_bundle: std.zig.ErrorBundle, src_base_path: ?[]const u8, uri_set: *std.StringArrayHashMapUnmanaged(void), -) std.mem.Allocator.Error!void { +) error{OutOfMemory}!void { if (error_bundle.errorMessageCount() == 0) return; for (error_bundle.getMessages()) |msg_index| { const err = error_bundle.getErrorMessage(msg_index); @@ -172,11 +174,10 @@ fn pathToUri(allocator: std.mem.Allocator, base_path: ?[]const u8, src_path: []c return try URI.fromPath(allocator, absolute_src_path); } -pub fn publishDiagnostics( - collection: *DiagnosticsCollection, - transport: lsp.AnyTransport, - offset_encoding: offsets.Encoding, -) (std.mem.Allocator.Error || lsp.AnyTransport.WriteError)!void { +pub fn publishDiagnostics(collection: *DiagnosticsCollection) (std.mem.Allocator.Error || lsp.AnyTransport.WriteError)!void { + const transport = collection.transport orelse return; + const offset_encoding = collection.offset_encoding orelse return; + var arena_allocator = std.heap.ArenaAllocator.init(collection.allocator); defer arena_allocator.deinit(); diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index e07742131..2bf71da27 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -14,6 +14,7 @@ const translate_c = @import("translate_c.zig"); const AstGen = std.zig.AstGen; const Zir = std.zig.Zir; const DocumentScope = @import("DocumentScope.zig"); +const DiagnosticsCollection = @import("DiagnosticsCollection.zig"); const DocumentStore = @This(); @@ -25,6 +26,7 @@ thread_pool: if (builtin.single_threaded) void else *std.Thread.Pool, handles: std.StringArrayHashMapUnmanaged(*Handle) = .{}, build_files: std.StringArrayHashMapUnmanaged(*BuildFile) = .{}, cimports: std.AutoArrayHashMapUnmanaged(Hash, translate_c.Result) = .{}, +diagnostics_collection: *DiagnosticsCollection, pub const Uri = []const u8; @@ -1517,6 +1519,10 @@ pub fn resolveCImport(self: *DocumentStore, handle: *Handle, node: Ast.Node.Inde } } + self.publishCimportDiagnostics(handle) catch |err| { + log.err("failed to publish cImport diagnostics: {}", .{err}); + }; + switch (result) { .success => |uri| { log.debug("Translated cImport into {s}", .{uri}); @@ -1526,6 +1532,65 @@ pub fn resolveCImport(self: *DocumentStore, handle: *Handle, node: Ast.Node.Inde } } +fn publishCimportDiagnostics(self: *DocumentStore, handle: *Handle) !void { + const file_path = URI.parse(self.allocator, handle.uri) catch |err| { + log.err("failed to parse URI '{s}': {}", .{ handle.uri, err }); + return; + }; + defer self.allocator.free(file_path); + + var wip: std.zig.ErrorBundle.Wip = undefined; + try wip.init(self.allocator); + defer wip.deinit(); + + const src_path = try wip.addString(file_path); + + for (handle.cimports.items(.hash), handle.cimports.items(.node)) |hash, node| { + const result = blk: { + self.lock.lock(); + defer self.lock.unlock(); + break :blk self.cimports.get(hash) orelse continue; + }; + const error_bundle: std.zig.ErrorBundle = switch (result) { + .success => continue, + .failure => |bundle| bundle, + }; + + if (error_bundle.errorMessageCount() == 0) continue; + + const loc = offsets.nodeToLoc(handle.tree, node); + const source_loc = std.zig.findLineColumn(handle.tree.source, loc.start); + + comptime std.debug.assert(max_document_size <= std.math.maxInt(u32)); + + const src_loc = try wip.addSourceLocation(.{ + .src_path = src_path, + .line = @intCast(source_loc.line), + .column = @intCast(source_loc.column), + .span_start = @intCast(loc.start), + .span_main = @intCast(loc.start), + .span_end = @intCast(loc.end), + .source_line = try wip.addString(source_loc.source_line), + }); + + for (error_bundle.getMessages()) |err_msg_index| { + const err_msg = error_bundle.getErrorMessage(err_msg_index); + const msg = error_bundle.nullTerminatedString(err_msg.msg); + + try wip.addRootErrorMessage(.{ + .msg = try wip.addString(msg), + .src_loc = src_loc, + }); + } + } + + var error_bundle = try wip.toOwnedBundle(""); + defer error_bundle.deinit(self.allocator); + + try self.diagnostics_collection.pushErrorBundle(.cimport, handle.version, null, error_bundle); + try self.diagnostics_collection.publishDiagnostics(); +} + /// takes the string inside a @import() node (without the quotation marks) /// and returns it's uri /// caller owns the returned memory diff --git a/src/Server.zig b/src/Server.zig index 8f19733e2..b525bd007 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -45,6 +45,7 @@ config: Config = .{}, /// will default to lookup in the system and user configuration folder provided by known-folders. config_path: ?[]const u8 = null, document_store: DocumentStore, +/// Use `setTransport` to set the Transport. transport: ?lsp.AnyTransport = null, message_tracing: bool = false, offset_encoding: offsets.Encoding = .@"utf-16", @@ -426,6 +427,7 @@ fn initializeHandler(server: *Server, arena: std.mem.Allocator, request: types.I } else { server.offset_encoding = .@"utf-16"; } + server.diagnostics_collection.offset_encoding = server.offset_encoding; } if (request.capabilities.textDocument) |textDocument| { @@ -808,7 +810,6 @@ const Workspace = struct { const zig_exe_path = server.config.zig_exe_path orelse return; const zig_lib_path = server.config.zig_lib_path orelse return; const build_runner_path = server.config.build_runner_path orelse return; - const transport = server.transport orelse return; const workspace_path = @import("uri.zig").parse(server.allocator, workspace.uri) catch |err| { log.err("failed to parse URI '{s}': {}", .{ workspace.uri, err }); @@ -826,8 +827,6 @@ const Workspace = struct { .zig_lib_path = zig_lib_path, .build_runner_path = build_runner_path, .collection = &server.diagnostics_collection, - .lsp_transport = transport, - .offset_encoding = server.offset_encoding, }) catch |err| { workspace.build_on_save = null; log.err("failed to initilize Build-On-Save for '{s}': {}", .{ workspace.uri, err }); @@ -1869,6 +1868,7 @@ pub fn create(allocator: std.mem.Allocator) !*Server { .allocator = allocator, .config = DocumentStore.Config.fromMainConfig(Config{}), .thread_pool = if (zig_builtin.single_threaded) {} else undefined, // set below + .diagnostics_collection = &server.diagnostics_collection, }, .job_queue = std.fifo.LinearFifo(Job, .Dynamic).init(allocator), .thread_pool = undefined, // set below @@ -1909,6 +1909,11 @@ pub fn destroy(server: *Server) void { server.allocator.destroy(server); } +pub fn setTransport(server: *Server, transport: lsp.AnyTransport) void { + server.transport = transport; + server.diagnostics_collection.transport = transport; +} + pub fn keepRunning(server: Server) bool { switch (server.status) { .exiting_success, .exiting_failure => return false, diff --git a/src/features/diagnostics.zig b/src/features/diagnostics.zig index 982a5164d..fa6d40751 100644 --- a/src/features/diagnostics.zig +++ b/src/features/diagnostics.zig @@ -25,8 +25,6 @@ pub fn generateDiagnostics( const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - const transport = server.transport orelse return; - { var arena_allocator = std.heap.ArenaAllocator.init(server.diagnostics_collection.allocator); errdefer arena_allocator.deinit(); @@ -61,18 +59,8 @@ pub fn generateDiagnostics( try server.diagnostics_collection.pushErrorBundle(.parse, handle.version, null, error_bundle); } - { - var arena_allocator = std.heap.ArenaAllocator.init(server.diagnostics_collection.allocator); - errdefer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - - var diagnostics: std.ArrayListUnmanaged(types.Diagnostic) = .{}; - try collectCimportDiagnostics(&server.document_store, handle, arena, &diagnostics, server.offset_encoding); - try server.diagnostics_collection.pushLspDiagnostics(.cimport, handle.uri, arena_allocator.state, diagnostics.items); - } - std.debug.assert(server.client_capabilities.supports_publish_diagnostics); - server.diagnostics_collection.publishDiagnostics(transport, server.offset_encoding) catch |err| { + server.diagnostics_collection.publishDiagnostics() catch |err| { log.err("failed to publish diagnostics: {}", .{err}); }; } @@ -182,43 +170,6 @@ fn collectWarnStyleDiagnostics( } } -fn collectCimportDiagnostics( - document_store: *DocumentStore, - handle: *DocumentStore.Handle, - arena: std.mem.Allocator, - diagnostics: *std.ArrayListUnmanaged(types.Diagnostic), - offset_encoding: offsets.Encoding, -) error{OutOfMemory}!void { - const tracy_zone = tracy.trace(@src()); - defer tracy_zone.end(); - - for (handle.cimports.items(.hash), handle.cimports.items(.node)) |hash, node| { - const result = blk: { - document_store.lock.lock(); - defer document_store.lock.unlock(); - break :blk document_store.cimports.get(hash) orelse continue; - }; - const error_bundle: std.zig.ErrorBundle = switch (result) { - .success => continue, - .failure => |bundle| bundle, - }; - - if (error_bundle.errorMessageCount() == 0) continue; // `getMessages` can't be called on an empty ErrorBundle - try diagnostics.ensureUnusedCapacity(arena, error_bundle.errorMessageCount()); - for (error_bundle.getMessages()) |err_msg_index| { - const err_msg = error_bundle.getErrorMessage(err_msg_index); - - diagnostics.appendAssumeCapacity(.{ - .range = offsets.nodeToRange(handle.tree, node, offset_encoding), - .severity = .Error, - .code = .{ .string = "cImport" }, - .source = "zls", - .message = try arena.dupe(u8, error_bundle.nullTerminatedString(err_msg.msg)), - }); - } - } -} - fn collectGlobalVarDiagnostics( tree: Ast, arena: std.mem.Allocator, @@ -440,8 +391,6 @@ pub const BuildOnSave = struct { build_runner_path: []const u8, collection: *DiagnosticsCollection, - lsp_transport: lsp.AnyTransport, - offset_encoding: offsets.Encoding, }; pub fn init(self: *BuildOnSave, options: InitOptions) !void { @@ -493,8 +442,6 @@ pub const BuildOnSave = struct { self.thread = try std.Thread.spawn(.{ .allocator = options.allocator }, loop, .{ self, options.collection, - options.lsp_transport, - options.offset_encoding, duped_workspace_path, }); } @@ -539,8 +486,6 @@ pub const BuildOnSave = struct { fn loop( self: *BuildOnSave, collection: *DiagnosticsCollection, - lsp_transport: lsp.AnyTransport, - offset_encoding: offsets.Encoding, workspace_path: []const u8, ) void { defer self.allocator.free(workspace_path); @@ -567,8 +512,6 @@ pub const BuildOnSave = struct { self, &transport, collection, - lsp_transport, - offset_encoding, workspace_path, ) catch |err| { log.err("failed to handle error bundle message from build runner: {}", .{err}); @@ -586,8 +529,6 @@ pub const BuildOnSave = struct { self: *BuildOnSave, transport: *Transport, collection: *DiagnosticsCollection, - lsp_transport: lsp.AnyTransport, - offset_encoding: offsets.Encoding, workspace_path: []const u8, ) !void { const header = try transport.reader().readStructEndian(ServerToClient.ErrorBundle, .little); @@ -607,6 +548,6 @@ pub const BuildOnSave = struct { const diagnostic_tag: DiagnosticsCollection.Tag = @enumFromInt(@as(u32, @truncate(hasher.final()))); try collection.pushErrorBundle(diagnostic_tag, header.cycle, workspace_path, error_bundle); - try collection.publishDiagnostics(lsp_transport, offset_encoding); + try collection.publishDiagnostics(); } }; diff --git a/src/main.zig b/src/main.zig index 3af665c58..df1c57f76 100644 --- a/src/main.zig +++ b/src/main.zig @@ -348,7 +348,7 @@ pub fn main() !u8 { const server = try zls.Server.create(allocator); defer server.destroy(); - server.transport = transport.any(); + server.setTransport(transport.any()); server.config_path = result.config_path; server.message_tracing = result.enable_message_tracing; diff --git a/src/translate_c.zig b/src/translate_c.zig index bc26ea7f9..0e1005d76 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -182,7 +182,7 @@ pub fn translate( var process = std.process.Child.init(argv.items, allocator); process.stdin_behavior = .Pipe; process.stdout_behavior = .Pipe; - process.stderr_behavior = .Pipe; + process.stderr_behavior = .Ignore; errdefer |err| if (!zig_builtin.is_test) reportTranslateError(allocator, process.stderr, argv.items, @errorName(err)); From bc2778d0b5d1b3520414e47acc0ff6d2c9aef926 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Sun, 1 Dec 2024 21:18:39 +0100 Subject: [PATCH 17/31] handle missing source line in error bundle source locations --- src/DiagnosticsCollection.zig | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/DiagnosticsCollection.zig b/src/DiagnosticsCollection.zig index 1e9584b5e..b46996c9e 100644 --- a/src/DiagnosticsCollection.zig +++ b/src/DiagnosticsCollection.zig @@ -279,12 +279,22 @@ fn errorBundleSourceLocationToRange( src_loc: std.zig.ErrorBundle.SourceLocation, offset_encoding: offsets.Encoding, ) lsp.types.Range { - const source_line = error_bundle.nullTerminatedString(src_loc.source_line); - + // We assume that the span is inside of the source line const source_line_range_utf8: lsp.types.Range = .{ .start = .{ .line = 0, .character = src_loc.column - (src_loc.span_main - src_loc.span_start) }, .end = .{ .line = 0, .character = src_loc.column + (src_loc.span_end - src_loc.span_main) }, }; + + if (src_loc.source_line == 0) { + // Without the source line it is not possible to figure out the precise character value + // The result will be incorrect if the line contains non-ascii characters + return .{ + .start = .{ .line = src_loc.line, .character = source_line_range_utf8.start.character }, + .end = .{ .line = src_loc.line, .character = source_line_range_utf8.end.character }, + }; + } + + const source_line = error_bundle.nullTerminatedString(src_loc.source_line); const source_line_range = offsets.convertRangeEncoding(source_line, source_line_range_utf8, .@"utf-8", offset_encoding); return .{ From 76da7a2f60532c865ea35f512419f2fabf4cab40 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Sun, 1 Dec 2024 22:35:58 +0100 Subject: [PATCH 18/31] more DiagnosticsCollection test coverage --- src/DiagnosticsCollection.zig | 79 +++++++++++++++++++++++++++++++---- 1 file changed, 71 insertions(+), 8 deletions(-) diff --git a/src/DiagnosticsCollection.zig b/src/DiagnosticsCollection.zig index b46996c9e..d53723626 100644 --- a/src/DiagnosticsCollection.zig +++ b/src/DiagnosticsCollection.zig @@ -303,6 +303,49 @@ fn errorBundleSourceLocationToRange( }; } +test errorBundleSourceLocationToRange { + var eb = try createTestingErrorBundle(&.{ + .{ + .message = "First Error", + .source_location = .{ + .src_path = "", + .line = 2, + .column = 6, + .span_start = 14, + .span_main = 14, + .span_end = 17, + .source_line = "const foo = 5", + }, + }, + .{ + .message = "Second Error", + .source_location = .{ + .src_path = "", + .line = 1, + .column = 4, + .span_start = 20, + .span_main = 23, + .span_end = 25, + .source_line = null, + }, + }, + }); + defer eb.deinit(std.testing.allocator); + + const src_loc0 = eb.getSourceLocation(eb.getErrorMessage(eb.getMessages()[0]).src_loc); + const src_loc1 = eb.getSourceLocation(eb.getErrorMessage(eb.getMessages()[1]).src_loc); + + try std.testing.expectEqual(lsp.types.Range{ + .start = .{ .line = 2, .character = 6 }, + .end = .{ .line = 2, .character = 9 }, + }, errorBundleSourceLocationToRange(eb, src_loc0, .@"utf-8")); + + try std.testing.expectEqual(lsp.types.Range{ + .start = .{ .line = 1, .character = 1 }, + .end = .{ .line = 1, .character = 6 }, + }, errorBundleSourceLocationToRange(eb, src_loc1, .@"utf-8")); +} + test { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); @@ -321,13 +364,16 @@ test { var eb3 = try createTestingErrorBundle(&.{.{ .message = "As" }}); defer eb3.deinit(std.testing.allocator); + const uri = try URI.fromPath(std.testing.allocator, testing_src_path); + defer std.testing.allocator.free(uri); + { try collection.pushErrorBundle(.parse, 1, null, eb1); try std.testing.expectEqual(1, collection.outdated_files.count()); - try std.testing.expectEqualStrings("file:///sample.zig", collection.outdated_files.keys()[0]); + try std.testing.expectEqualStrings(uri, collection.outdated_files.keys()[0]); var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; - try collection.collectLspDiagnosticsForDocument("file:///sample.zig", .@"utf-8", arena, &diagnostics); + try collection.collectLspDiagnosticsForDocument(uri, .@"utf-8", arena, &diagnostics); try std.testing.expectEqual(1, diagnostics.items.len); try std.testing.expectEqual(lsp.types.DiagnosticSeverity.Error, diagnostics.items[0].severity); @@ -339,7 +385,7 @@ test { try collection.pushErrorBundle(.parse, 0, null, eb2); var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; - try collection.collectLspDiagnosticsForDocument("file:///sample.zig", .@"utf-8", arena, &diagnostics); + try collection.collectLspDiagnosticsForDocument(uri, .@"utf-8", arena, &diagnostics); try std.testing.expectEqual(1, diagnostics.items.len); try std.testing.expectEqualStrings("Living For The City", diagnostics.items[0].message); @@ -349,7 +395,7 @@ test { try collection.pushErrorBundle(.parse, 2, null, eb2); var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; - try collection.collectLspDiagnosticsForDocument("file:///sample.zig", .@"utf-8", arena, &diagnostics); + try collection.collectLspDiagnosticsForDocument(uri, .@"utf-8", arena, &diagnostics); try std.testing.expectEqual(1, diagnostics.items.len); try std.testing.expectEqualStrings("You Haven't Done Nothin'", diagnostics.items[0].message); @@ -359,12 +405,29 @@ test { try collection.pushErrorBundle(.parse, 3, null, .empty); var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; - try collection.collectLspDiagnosticsForDocument("file:///sample.zig", .@"utf-8", arena, &diagnostics); + try collection.collectLspDiagnosticsForDocument(uri, .@"utf-8", arena, &diagnostics); try std.testing.expectEqual(0, diagnostics.items.len); } + + { + try collection.pushErrorBundle(@enumFromInt(16), 4, null, eb2); + try collection.pushErrorBundle(@enumFromInt(17), 4, null, eb3); + + var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; + try collection.collectLspDiagnosticsForDocument(uri, .@"utf-8", arena, &diagnostics); + + try std.testing.expectEqual(2, diagnostics.items.len); + try std.testing.expectEqualStrings("You Haven't Done Nothin'", diagnostics.items[0].message); + try std.testing.expectEqualStrings("As", diagnostics.items[1].message); + } } +const testing_src_path = switch (@import("builtin").os.tag) { + .windows => "C:\\sample.zig", + else => "/sample.zig", +}; + fn createTestingErrorBundle(messages: []const struct { message: []const u8, count: u32 = 1, @@ -375,8 +438,8 @@ fn createTestingErrorBundle(messages: []const struct { span_start: u32, span_main: u32, span_end: u32, - source_line: []const u8, - } = .{ .src_path = "/sample.zig", .line = 0, .column = 0, .span_start = 0, .span_main = 0, .span_end = 0, .source_line = "" }, + source_line: ?[]const u8, + } = .{ .src_path = testing_src_path, .line = 0, .column = 0, .span_start = 0, .span_main = 0, .span_end = 0, .source_line = "" }, }) error{OutOfMemory}!std.zig.ErrorBundle { var eb: std.zig.ErrorBundle.Wip = undefined; try eb.init(std.testing.allocator); @@ -393,7 +456,7 @@ fn createTestingErrorBundle(messages: []const struct { .span_start = msg.source_location.span_start, .span_main = msg.source_location.span_main, .span_end = msg.source_location.span_end, - .source_line = try eb.addString(msg.source_location.source_line), + .source_line = if (msg.source_location.source_line) |source_line| try eb.addString(source_line) else 0, }), }); } From 2b862db316796e4065919792029ea710af0f1ca2 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Tue, 3 Dec 2024 15:58:59 +0100 Subject: [PATCH 19/31] update some configuration log message checks --- src/Server.zig | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index b525bd007..7f586e844 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -943,7 +943,7 @@ pub fn updateConfiguration( // apply changes // <----------------------------------------------------------> - var has_changed: [std.meta.fields(Config).len]bool = .{false} ** std.meta.fields(Config).len; + var has_changed: [std.meta.fields(Config).len]bool = @splat(false); inline for (std.meta.fields(Config), 0..) |field, field_index| { if (@field(new_config, field.name)) |new_value| { @@ -1052,16 +1052,16 @@ pub fn updateConfiguration( switch (resolve_result.build_runner_version) { .resolved, .unresolved_dont_error => {}, - .unresolved => { + .unresolved => blk: { + if (!options.resolve) break :blk; + const zig_version = resolve_result.zig_runtime_version.?; const zls_version = build_options.version; const zig_version_is_tagged = zig_version.pre == null and zig_version.build == null; const zls_version_is_tagged = zls_version.pre == null and zls_version.build == null; - if (zig_builtin.is_test) { - // This has test coverage in `src/build_runner/BuildRunnerVersion.zig` - } else if (zig_version_is_tagged) { + if (zig_version_is_tagged) { server.showMessage( .Warning, "Zig {} should be used with ZLS {}.{}.* but ZLS {} is being used.", @@ -1092,13 +1092,14 @@ pub fn updateConfiguration( } if (server.config.enable_build_on_save orelse false) { - if (!std.process.can_spawn) { - log.info("'enable_build_on_save' is ignored because your OS can't spawn a child process", .{}); + if (!Workspace.build_on_save_supported) { + // This message is not very helpful but it relatively uncommon to happen anyway. + log.info("'enable_build_on_save' is ignored because build on save is not supported by this ZLS build", .{}); } else if (server.status == .initialized and (server.config.zig_exe_path == null or server.config.zig_lib_path == null)) { log.warn("'enable_build_on_save' is ignored because Zig could not be found", .{}); } else if (!server.client_capabilities.supports_publish_diagnostics) { log.warn("'enable_build_on_save' is ignored because it is not supported by {s}", .{server.client_capabilities.client_name orelse "your editor"}); - } else if (server.status == .initialized and resolve_result.build_runner_version != .resolved) { + } else if (server.status == .initialized and server.config.build_runner_path == null and options.resolve and resolve_result.build_runner_version != .resolved) { log.warn("'enable_build_on_save' is ignored because no build runner is available", .{}); } } From a490392b0f3f1152ea78295c737fb27e1928414f Mon Sep 17 00:00:00 2001 From: Techatrix Date: Tue, 3 Dec 2024 16:13:04 +0100 Subject: [PATCH 20/31] fix a bug where build on save wouldn't start if there is new workspace config --- src/Server.zig | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index 7f586e844..ec6bfd6f1 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -653,12 +653,7 @@ fn initializedHandler(server: *Server, _: std.mem.Allocator, notification: types if (server.client_capabilities.supports_configuration) { try server.requestConfiguration(); - } else { - if (Workspace.build_on_save_supported) { - for (server.workspaces.items) |*workspace| { - try workspace.startOrRestartBuildOnSave(server); - } - } + // TODO if the `workspace/configuration` request fails to be handled, build on save will not be started } if (std.crypto.random.intRangeLessThan(usize, 0, 32768) == 0) { @@ -796,15 +791,20 @@ const Workspace = struct { allocator.free(workspace.uri); } - fn startOrRestartBuildOnSave(workspace: *Workspace, server: *Server) error{OutOfMemory}!void { + fn refreshBuildOnSave(workspace: *Workspace, server: *Server, options: struct { + /// Whether the build on save process should be restated if it is already running. + restart: bool, + }) error{OutOfMemory}!void { comptime std.debug.assert(build_on_save_supported); + const enable_build_on_save = server.config.enable_build_on_save orelse true; + if (workspace.build_on_save) |*build_on_save| { + if (enable_build_on_save and !options.restart) return; build_on_save.deinit(); workspace.build_on_save = null; } - const enable_build_on_save = server.config.enable_build_on_save orelse true; if (!enable_build_on_save) return; const zig_exe_path = server.config.zig_exe_path orelse return; @@ -1004,15 +1004,22 @@ pub fn updateConfiguration( } if (Workspace.build_on_save_supported and - server.status == .initialized and - (new_zig_exe_path or - new_zig_lib_path or - new_build_runner_path or - new_enable_build_on_save or - new_build_on_save_args)) + // If the client supports the `workspace/configuration` request, defer + // build on save initialization until after we have received workspace + // configuration from the server + (!server.client_capabilities.supports_configuration or server.status == .initialized)) { + const should_restart = + new_zig_exe_path or + new_zig_lib_path or + new_build_runner_path or + new_enable_build_on_save or + new_build_on_save_args; + for (server.workspaces.items) |*workspace| { - try workspace.startOrRestartBuildOnSave(server); + try workspace.refreshBuildOnSave(server, .{ + .restart = should_restart, + }); } } From 1b23c832eb4f446126f1d5473d3c5fc46ed10493 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Tue, 3 Dec 2024 18:39:15 +0100 Subject: [PATCH 21/31] set default offset encoding for DiagnosticsCollection.zig --- src/DiagnosticsCollection.zig | 5 ++--- src/Server.zig | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/DiagnosticsCollection.zig b/src/DiagnosticsCollection.zig index d53723626..426b5ef75 100644 --- a/src/DiagnosticsCollection.zig +++ b/src/DiagnosticsCollection.zig @@ -16,7 +16,7 @@ tag_set: std.AutoArrayHashMapUnmanaged(Tag, struct { }) = .{}, outdated_files: std.StringArrayHashMapUnmanaged(void) = .{}, transport: ?lsp.AnyTransport = null, -offset_encoding: ?offsets.Encoding = null, +offset_encoding: offsets.Encoding = .@"utf-16", const DiagnosticsCollection = @This(); @@ -176,7 +176,6 @@ fn pathToUri(allocator: std.mem.Allocator, base_path: ?[]const u8, src_path: []c pub fn publishDiagnostics(collection: *DiagnosticsCollection) (std.mem.Allocator.Error || lsp.AnyTransport.WriteError)!void { const transport = collection.transport orelse return; - const offset_encoding = collection.offset_encoding orelse return; var arena_allocator = std.heap.ArenaAllocator.init(collection.allocator); defer arena_allocator.deinit(); @@ -193,7 +192,7 @@ pub fn publishDiagnostics(collection: *DiagnosticsCollection) (std.mem.Allocator _ = arena_allocator.reset(.retain_capacity); var diagnostics: std.ArrayListUnmanaged(lsp.types.Diagnostic) = .{}; - try collection.collectLspDiagnosticsForDocument(document_uri, offset_encoding, arena_allocator.allocator(), &diagnostics); + try collection.collectLspDiagnosticsForDocument(document_uri, collection.offset_encoding, arena_allocator.allocator(), &diagnostics); const params: lsp.types.PublishDiagnosticsParams = .{ .uri = document_uri, diff --git a/src/Server.zig b/src/Server.zig index ec6bfd6f1..b8c69cf2b 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -427,8 +427,8 @@ fn initializeHandler(server: *Server, arena: std.mem.Allocator, request: types.I } else { server.offset_encoding = .@"utf-16"; } - server.diagnostics_collection.offset_encoding = server.offset_encoding; } + server.diagnostics_collection.offset_encoding = server.offset_encoding; if (request.capabilities.textDocument) |textDocument| { server.client_capabilities.supports_publish_diagnostics = textDocument.publishDiagnostics != null; From adeb6185df94e4fb5a8ac53f96d2424ab3706574 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 5 Dec 2024 16:28:49 +0100 Subject: [PATCH 22/31] Add back the std.build.Watch check for Linux pre 5.17 3d6eb6c7c09ca5d7c3e6462805e340717ca5d0cb --- src/build_runner/master.zig | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index 7510be792..c6f7b4071 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -390,6 +390,15 @@ pub fn main() !void { suicide_thread.detach(); if (!Watch.have_impl) return; + + if (builtin.os.tag == .linux) blk: { + // std.build.Watch requires `FAN_REPORT_TARGET_FID` which is Linux 5.17+ + const utsname = std.posix.uname(); + const version = std.SemanticVersion.parse(&utsname.release) catch break :blk; + if (version.order(.{ .major = 5, .minor = 17, .patch = 0 }) != .lt) break :blk; + return; + } + var w = try Watch.init(); const gpa = arena; From e7de3c5e0330e04f8c354a67891b90bddd065d52 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 5 Dec 2024 16:55:36 +0100 Subject: [PATCH 23/31] send step id in build runner to differentiate error between steps --- src/build_runner/master.zig | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index c6f7b4071..6cadd4c7b 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -425,7 +425,7 @@ pub fn main() !void { rebuild: while (true) : (run.cycle += 1) { runSteps( builder, - step_stack.keys(), + &step_stack, main_progress_node, &run, ) catch |err| switch (err) { @@ -559,11 +559,12 @@ fn prepare( fn runSteps( b: *std.Build, - steps: []const *Step, + steps_stack: *const std.AutoArrayHashMapUnmanaged(*Step, void), parent_prog_node: std.Progress.Node, run: *Run, ) error{ OutOfMemory, UncleanExit }!void { const thread_pool = &run.thread_pool; + const steps = steps_stack.keys(); var step_prog = parent_prog_node.start("steps", steps.len); defer step_prog.end(); @@ -579,17 +580,17 @@ fn runSteps( wait_group.start(); thread_pool.spawn(workerMakeOneStep, .{ - &wait_group, b, step, step_prog, run, + &wait_group, b, steps_stack, step, step_prog, run, }) catch @panic("OOM"); } if (run.transport) |transport| { for (steps) |step| { + const step_id: u32 = @intCast(steps_stack.getIndex(step).?); // missing fields: // - result_error_msgs // - result_stderr - // TODO step_id - serveWatchErrorBundle(transport, 0, run.cycle, step.result_error_bundle) catch @panic("failed to send watch errors"); + serveWatchErrorBundle(transport, step_id, run.cycle, step.result_error_bundle) catch @panic("failed to send watch errors"); } } } @@ -647,6 +648,7 @@ fn constructGraphAndCheckForDependencyLoop( fn workerMakeOneStep( wg: *std.Thread.WaitGroup, b: *std.Build, + steps_stack: *const std.AutoArrayHashMapUnmanaged(*Step, void), s: *Step, prog_node: std.Progress.Node, run: *Run, @@ -711,11 +713,11 @@ fn workerMakeOneStep( }); if (run.transport) |transport| { + const step_id: u32 = @intCast(steps_stack.getIndex(s).?); // missing fields: // - result_error_msgs // - result_stderr - // TODO step_id - serveWatchErrorBundle(transport, 0, run.cycle, s.result_error_bundle) catch @panic("failed to send watch errors"); + serveWatchErrorBundle(transport, step_id, run.cycle, s.result_error_bundle) catch @panic("failed to send watch errors"); } handle_result: { @@ -733,7 +735,7 @@ fn workerMakeOneStep( for (s.dependants.items) |dep| { wg.start(); thread_pool.spawn(workerMakeOneStep, .{ - wg, b, dep, prog_node, run, + wg, b, steps_stack, dep, prog_node, run, }) catch @panic("OOM"); } } @@ -759,7 +761,7 @@ fn workerMakeOneStep( wg.start(); thread_pool.spawn(workerMakeOneStep, .{ - wg, b, dep, prog_node, run, + wg, b, steps_stack, dep, prog_node, run, }) catch @panic("OOM"); } else { run.memory_blocked_steps.items[i] = dep; @@ -1040,7 +1042,7 @@ fn extractBuildInformation( // run all steps that are dependencies try runSteps( b, - step_dependencies.keys(), + &step_dependencies, main_progress_node, run, ); From 48125499c688c501ab6a1d9382c479c63f268ee6 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 5 Dec 2024 17:11:07 +0100 Subject: [PATCH 24/31] report stderr when build on save runner exits early --- src/features/diagnostics.zig | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/features/diagnostics.zig b/src/features/diagnostics.zig index fa6d40751..33112aba6 100644 --- a/src/features/diagnostics.zig +++ b/src/features/diagnostics.zig @@ -499,7 +499,15 @@ pub const BuildOnSave = struct { while (true) { const header = transport.receiveMessage(null) catch |err| switch (err) { - error.EndOfStream => return, + error.EndOfStream => { + const stderr = self.child_process.stderr.?.readToEndAlloc(self.allocator, 16 * 1024 * 1024) catch ""; + defer self.allocator.free(stderr); + + if (stderr.len != 0) { + log.err("zig build runner exited with stderr:\n{s}", .{stderr}); + } + return; + }, else => { log.err("failed to receive message from build runner: {}", .{err}); return; From 2fd15020d2b1837e74b213c2978b869b89f1c870 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Fri, 6 Dec 2024 21:46:11 +0100 Subject: [PATCH 25/31] remove invalid free --- src/DiagnosticsCollection.zig | 1 - 1 file changed, 1 deletion(-) diff --git a/src/DiagnosticsCollection.zig b/src/DiagnosticsCollection.zig index 426b5ef75..63affe3db 100644 --- a/src/DiagnosticsCollection.zig +++ b/src/DiagnosticsCollection.zig @@ -40,7 +40,6 @@ pub fn deinit(collection: *DiagnosticsCollection) void { for (entry.diagnostics_set.keys(), entry.diagnostics_set.values()) |uri, lsp_diagnostic| { collection.allocator.free(uri); lsp_diagnostic.arena.promote(collection.allocator).deinit(); - collection.allocator.free(lsp_diagnostic.diagnostics); } entry.diagnostics_set.deinit(collection.allocator); } From 3ba30bf057e65ba8f92beb60cd22040c22cb3d91 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Tue, 10 Dec 2024 00:12:34 +0100 Subject: [PATCH 26/31] implement runtime zig version detection for build on save The main motivation for this is to keep support for the latest mach nominated zig version which is `0.14.0-dev.1911+3bf89f55c`. --- src/Server.zig | 66 +++++++++++++++++++++---------------- src/build_runner/master.zig | 11 ++----- src/build_runner/shared.zig | 43 ++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 37 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index b8c69cf2b..c4f99c4b9 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -20,6 +20,7 @@ const diff = @import("diff.zig"); const InternPool = @import("analyser/analyser.zig").InternPool; const DiagnosticsCollection = @import("DiagnosticsCollection.zig"); const known_folders = @import("known-folders"); +const build_runner_shared = @import("build_runner/shared.zig"); const BuildRunnerVersion = @import("build_runner/BuildRunnerVersion.zig").BuildRunnerVersion; const signature_help = @import("features/signature_help.zig"); @@ -767,12 +768,7 @@ fn handleConfiguration(server: *Server, json: std.json.Value) error{OutOfMemory} const Workspace = struct { uri: types.URI, - build_on_save: if (build_on_save_supported) ?diagnostics_gen.BuildOnSave else void, - - pub const build_on_save_supported = - std.process.can_spawn and - !zig_builtin.single_threaded and - std.Build.Watch.have_impl; + build_on_save: if (build_runner_shared.isBuildOnSaveSupportedComptime()) ?diagnostics_gen.BuildOnSave else void, fn init(server: *Server, uri: types.URI) error{OutOfMemory}!Workspace { const duped_uri = try server.allocator.dupe(u8, uri); @@ -780,53 +776,59 @@ const Workspace = struct { return .{ .uri = duped_uri, - .build_on_save = if (build_on_save_supported) null else {}, + .build_on_save = if (build_runner_shared.isBuildOnSaveSupportedComptime()) null else {}, }; } fn deinit(workspace: *Workspace, allocator: std.mem.Allocator) void { - if (build_on_save_supported) { + if (build_runner_shared.isBuildOnSaveSupportedComptime()) { if (workspace.build_on_save) |*build_on_save| build_on_save.deinit(); } allocator.free(workspace.uri); } - fn refreshBuildOnSave(workspace: *Workspace, server: *Server, options: struct { - /// Whether the build on save process should be restated if it is already running. + fn refreshBuildOnSave(workspace: *Workspace, args: struct { + server: *Server, + /// If null, build on save will be disabled + runtime_zig_version: ?std.SemanticVersion, + /// Whether the build on save process should be restarted if it is already running. restart: bool, }) error{OutOfMemory}!void { - comptime std.debug.assert(build_on_save_supported); + comptime std.debug.assert(build_runner_shared.isBuildOnSaveSupportedComptime()); + comptime std.debug.assert(build_options.version.order(.{ .major = 0, .minor = 14, .patch = 0 }) == .lt); // Update `isBuildOnSaveSupportedRuntime` and build runner - const enable_build_on_save = server.config.enable_build_on_save orelse true; + const build_on_save_supported = if (args.runtime_zig_version) |version| build_runner_shared.isBuildOnSaveSupportedRuntime(version) else false; + const build_on_save_wanted = args.server.config.enable_build_on_save orelse true; + const enable = build_on_save_supported and build_on_save_wanted; if (workspace.build_on_save) |*build_on_save| { - if (enable_build_on_save and !options.restart) return; + if (enable and !args.restart) return; build_on_save.deinit(); workspace.build_on_save = null; } - if (!enable_build_on_save) return; + if (!enable) return; - const zig_exe_path = server.config.zig_exe_path orelse return; - const zig_lib_path = server.config.zig_lib_path orelse return; - const build_runner_path = server.config.build_runner_path orelse return; + const zig_exe_path = args.server.config.zig_exe_path orelse return; + const zig_lib_path = args.server.config.zig_lib_path orelse return; + const build_runner_path = args.server.config.build_runner_path orelse return; - const workspace_path = @import("uri.zig").parse(server.allocator, workspace.uri) catch |err| { + const workspace_path = @import("uri.zig").parse(args.server.allocator, workspace.uri) catch |err| { log.err("failed to parse URI '{s}': {}", .{ workspace.uri, err }); return; }; - defer server.allocator.free(workspace_path); + defer args.server.allocator.free(workspace_path); workspace.build_on_save = @as(diagnostics_gen.BuildOnSave, undefined); workspace.build_on_save.?.init(.{ - .allocator = server.allocator, + .allocator = args.server.allocator, .workspace_path = workspace_path, - .build_on_save_args = server.config.build_on_save_args, - .check_step_only = server.config.enable_build_on_save == null, + .build_on_save_args = args.server.config.build_on_save_args, + .check_step_only = args.server.config.enable_build_on_save == null, .zig_exe_path = zig_exe_path, .zig_lib_path = zig_lib_path, .build_runner_path = build_runner_path, - .collection = &server.diagnostics_collection, + .collection = &args.server.diagnostics_collection, }) catch |err| { workspace.build_on_save = null; log.err("failed to initilize Build-On-Save for '{s}': {}", .{ workspace.uri, err }); @@ -1003,7 +1005,8 @@ pub fn updateConfiguration( } } - if (Workspace.build_on_save_supported and + if (build_runner_shared.isBuildOnSaveSupportedComptime() and + options.resolve and // If the client supports the `workspace/configuration` request, defer // build on save initialization until after we have received workspace // configuration from the server @@ -1017,7 +1020,9 @@ pub fn updateConfiguration( new_build_on_save_args; for (server.workspaces.items) |*workspace| { - try workspace.refreshBuildOnSave(server, .{ + try workspace.refreshBuildOnSave(.{ + .server = server, + .runtime_zig_version = resolve_result.zig_runtime_version, .restart = should_restart, }); } @@ -1099,15 +1104,18 @@ pub fn updateConfiguration( } if (server.config.enable_build_on_save orelse false) { - if (!Workspace.build_on_save_supported) { + if (!build_runner_shared.isBuildOnSaveSupportedComptime()) { // This message is not very helpful but it relatively uncommon to happen anyway. log.info("'enable_build_on_save' is ignored because build on save is not supported by this ZLS build", .{}); } else if (server.status == .initialized and (server.config.zig_exe_path == null or server.config.zig_lib_path == null)) { log.warn("'enable_build_on_save' is ignored because Zig could not be found", .{}); } else if (!server.client_capabilities.supports_publish_diagnostics) { log.warn("'enable_build_on_save' is ignored because it is not supported by {s}", .{server.client_capabilities.client_name orelse "your editor"}); - } else if (server.status == .initialized and server.config.build_runner_path == null and options.resolve and resolve_result.build_runner_version != .resolved) { + } else if (server.status == .initialized and options.resolve and resolve_result.build_runner_version == .unresolved and server.config.build_runner_path == null) { log.warn("'enable_build_on_save' is ignored because no build runner is available", .{}); + } else if (server.status == .initialized and options.resolve and resolve_result.zig_runtime_version != null and !build_runner_shared.isBuildOnSaveSupportedRuntime(resolve_result.zig_runtime_version.?)) { + // There is one edge-case where build on save is not supported because of Linux pre 5.17 + log.warn("'enable_build_on_save' is not supported by Zig {}", .{resolve_result.zig_runtime_version.?}); } } @@ -1236,8 +1244,10 @@ const ResolveConfigurationResult = struct { zig_env: ?std.json.Parsed(configuration.Env), zig_runtime_version: ?std.SemanticVersion, build_runner_version: union(enum) { - /// no suitable build runner could be resolved based on the `zig_runtime_version` + /// If returned, guarantees `zig_runtime_version != null`. resolved: BuildRunnerVersion, + /// no suitable build runner could be resolved based on the `zig_runtime_version` + /// If returned, guarantees `zig_runtime_version != null`. unresolved, unresolved_dont_error, }, diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index 6cadd4c7b..d0f7b3407 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -389,15 +389,8 @@ pub fn main() !void { }.do, .{}); suicide_thread.detach(); - if (!Watch.have_impl) return; - - if (builtin.os.tag == .linux) blk: { - // std.build.Watch requires `FAN_REPORT_TARGET_FID` which is Linux 5.17+ - const utsname = std.posix.uname(); - const version = std.SemanticVersion.parse(&utsname.release) catch break :blk; - if (version.order(.{ .major = 5, .minor = 17, .patch = 0 }) != .lt) break :blk; - return; - } + if (!shared.isBuildOnSaveSupportedComptime()) return; + if (!shared.isBuildOnSaveSupportedRuntime(builtin.zig_version)) return; var w = try Watch.init(); diff --git a/src/build_runner/shared.zig b/src/build_runner/shared.zig index 18b9d64e4..3d5872659 100644 --- a/src/build_runner/shared.zig +++ b/src/build_runner/shared.zig @@ -144,3 +144,46 @@ pub const ServerToClient = struct { string_bytes_len: u32, }; }; + +const windows_support_version = std.SemanticVersion.parse("0.14.0-dev.625+2de0e2eca") catch unreachable; +const kqueue_support_version = std.SemanticVersion.parse("0.14.0-dev.2046+b8795b4d0") catch unreachable; +/// The Zig version which added `std.Build.Watch.have_impl` +const have_impl_flag_version = kqueue_support_version; + +/// Returns true if is comptime known that build on save is supported. +pub inline fn isBuildOnSaveSupportedComptime() bool { + if (!std.process.can_spawn) return false; + if (builtin.single_threaded) return false; + return true; +} + +pub fn isBuildOnSaveSupportedRuntime(runtime_zig_version: std.SemanticVersion) bool { + if (!isBuildOnSaveSupportedComptime()) return false; + + if (builtin.os.tag == .linux) blk: { + // std.build.Watch requires `FAN_REPORT_TARGET_FID` which is Linux 5.17+ + const utsname = std.posix.uname(); + const version = std.SemanticVersion.parse(&utsname.release) catch break :blk; + if (version.order(.{ .major = 5, .minor = 17, .patch = 0 }) != .lt) break :blk; + return false; + } + + // This code path is present to support runtime Zig version before `0.14.0-dev.2046+b8795b4d0`. + // The main motivation is to keep support for the latest mach nominated zig version which is `0.14.0-dev.1911+3bf89f55c`. + return switch (builtin.os.tag) { + .linux => true, + .windows => runtime_zig_version.order(windows_support_version) != .lt, + .dragonfly, + .freebsd, + .netbsd, + .openbsd, + .ios, + .macos, + .tvos, + .visionos, + .watchos, + .haiku, + => runtime_zig_version.order(kqueue_support_version) != .lt, + else => false, + }; +} From 0927d16c3ee0c4fc25d30ec702e8c4acce8fa1e6 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Tue, 10 Dec 2024 00:12:15 +0100 Subject: [PATCH 27/31] set minimum runtime version to `0.14.0-dev.310+9d38e82b5` --- build.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.zig b/build.zig index 108a8381b..9b24fcae7 100644 --- a/build.zig +++ b/build.zig @@ -18,14 +18,14 @@ const zls_version = std.SemanticVersion{ .major = 0, .minor = 14, .patch = 0, .p const minimum_build_zig_version = "0.14.0-dev.2472+cc82620b2"; /// Specify the minimum Zig version that is required to run ZLS: -/// Build Runner: Implement File System Watching for kqueue +/// make zig compiler processes live across rebuilds /// /// Examples of reasons that would cause the minimum runtime version to be bumped are: /// - breaking change to the Zig Syntax /// - breaking change to AstGen (i.e `zig ast-check`) /// /// A breaking change to the Zig Build System should be handled by updating ZLS's build runner (see src\build_runner) -const minimum_runtime_zig_version = "0.14.0-dev.2046+b8795b4d0"; +const minimum_runtime_zig_version = "0.14.0-dev.310+9d38e82b5"; const release_targets = [_]std.Target.Query{ .{ .cpu_arch = .x86_64, .os_tag = .windows }, From 86c0d4d2ebf85ffc165f7cd677efe9bcd2cb2cf9 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Wed, 11 Dec 2024 17:34:15 +0100 Subject: [PATCH 28/31] CI: check build runner against mach-latest --- .github/workflows/build_runner.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build_runner.yml b/.github/workflows/build_runner.yml index e6c7db434..8c1f384df 100644 --- a/.github/workflows/build_runner.yml +++ b/.github/workflows/build_runner.yml @@ -19,6 +19,8 @@ jobs: fail-fast: false matrix: include: + - zig-version: mach-latest + build-runner-file: master.zig - zig-version: master build-runner-file: master.zig runs-on: ubuntu-latest From e390f5c7ea36c8f0010bc888d338bc79533ca0da Mon Sep 17 00:00:00 2001 From: Techatrix Date: Sun, 15 Dec 2024 20:24:14 +0100 Subject: [PATCH 29/31] cleanup build runner process error handling --- src/features/diagnostics.zig | 93 ++++++++++++++++++++++-------------- 1 file changed, 56 insertions(+), 37 deletions(-) diff --git a/src/features/diagnostics.zig b/src/features/diagnostics.zig index 33112aba6..dcc0aa9a4 100644 --- a/src/features/diagnostics.zig +++ b/src/features/diagnostics.zig @@ -426,9 +426,14 @@ pub const BuildOnSave = struct { return; }; - errdefer _ = child_process.kill() catch |err| { - std.debug.panic("failed to terminate build runner process, error: {}", .{err}); - }; + errdefer { + _ = terminateChildProcessReportError( + &child_process, + options.allocator, + "zig build runner", + .kill, + ); + } self.* = .{ .allocator = options.allocator, @@ -453,31 +458,13 @@ pub const BuildOnSave = struct { return; }; - const stderr_file = self.child_process.stderr.?; - const stderr = stderr_file.readToEndAlloc(self.allocator, 16 * 1024 * 1024) catch ""; - defer self.allocator.free(stderr); - - const term = self.child_process.wait() catch |err| { - log.warn("Failed to await zig build runner: {}", .{err}); - return; - }; - - switch (term) { - .Exited => |code| if (code != 0) { - if (stderr.len != 0) { - log.warn("zig build runner exited with non-zero status: {}\nstderr:\n{s}", .{ code, stderr }); - } else { - log.warn("zig build runner exited with non-zero status: {}", .{code}); - } - }, - else => { - if (stderr.len != 0) { - log.warn("zig build runner exitied abnormally: {s}\nstderr:\n{s}", .{ @tagName(term), stderr }); - } else { - log.warn("zig build runner exitied abnormally: {s}", .{@tagName(term)}); - } - }, - } + const success = terminateChildProcessReportError( + &self.child_process, + self.allocator, + "zig build runner", + .wait, + ); + if (!success) return; self.thread.join(); self.* = undefined; @@ -499,15 +486,7 @@ pub const BuildOnSave = struct { while (true) { const header = transport.receiveMessage(null) catch |err| switch (err) { - error.EndOfStream => { - const stderr = self.child_process.stderr.?.readToEndAlloc(self.allocator, 16 * 1024 * 1024) catch ""; - defer self.allocator.free(stderr); - - if (stderr.len != 0) { - log.err("zig build runner exited with stderr:\n{s}", .{stderr}); - } - return; - }, + error.EndOfStream => return, else => { log.err("failed to receive message from build runner: {}", .{err}); return; @@ -559,3 +538,43 @@ pub const BuildOnSave = struct { try collection.publishDiagnostics(); } }; + +fn terminateChildProcessReportError( + child_process: *std.process.Child, + allocator: std.mem.Allocator, + name: []const u8, + kind: enum { wait, kill }, +) bool { + const stderr = if (child_process.stderr) |stderr| + stderr.readToEndAlloc(allocator, 16 * 1024 * 1024) catch "" + else + ""; + defer allocator.free(stderr); + + const term = (switch (kind) { + .wait => child_process.wait(), + .kill => child_process.kill(), + }) catch |err| { + log.warn("Failed to await {s}: {}", .{ name, err }); + return false; + }; + + switch (term) { + .Exited => |code| if (code != 0) { + if (stderr.len != 0) { + log.warn("{s} exited with non-zero status: {}\nstderr:\n{s}", .{ name, code, stderr }); + } else { + log.warn("{s} exited with non-zero status: {}", .{ name, code }); + } + }, + else => { + if (stderr.len != 0) { + log.warn("{s} exitied abnormally: {s}\nstderr:\n{s}", .{ name, @tagName(term), stderr }); + } else { + log.warn("{s} exitied abnormally: {s}", .{ name, @tagName(term) }); + } + }, + } + + return true; +} From 81bc1fbda2757dc79003308a81437cf31b59290d Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 19 Dec 2024 22:19:09 +0100 Subject: [PATCH 30/31] minor build on save runner tweaks --- src/Server.zig | 3 ++- src/features/diagnostics.zig | 21 ++++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index c4f99c4b9..1f7a044ac 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -803,6 +803,7 @@ const Workspace = struct { if (workspace.build_on_save) |*build_on_save| { if (enable and !args.restart) return; + log.debug("stopped Build-On-Save for '{s}'", .{workspace.uri}); build_on_save.deinit(); workspace.build_on_save = null; } @@ -835,7 +836,7 @@ const Workspace = struct { return; }; - log.info("started Build-On-Save for '{s}'", .{workspace.uri}); + log.info("trying to start Build-On-Save for '{s}'", .{workspace.uri}); } }; diff --git a/src/features/diagnostics.zig b/src/features/diagnostics.zig index dcc0aa9a4..839d00261 100644 --- a/src/features/diagnostics.zig +++ b/src/features/diagnostics.zig @@ -454,7 +454,15 @@ pub const BuildOnSave = struct { pub fn deinit(self: *BuildOnSave) void { // this write tells the child process to exit self.child_process.stdin.?.writeAll("\xaa") catch |err| { - log.warn("failed to send message to zig build runner: {}", .{err}); + if (err != error.BrokenPipe) { + log.warn("failed to send message to zig build runner: {}", .{err}); + } + _ = terminateChildProcessReportError( + &self.child_process, + self.allocator, + "zig build runner", + .kill, + ); return; }; @@ -486,9 +494,12 @@ pub const BuildOnSave = struct { while (true) { const header = transport.receiveMessage(null) catch |err| switch (err) { - error.EndOfStream => return, + error.EndOfStream => { + log.debug("zig build runner process has exited", .{}); + return; + }, else => { - log.err("failed to receive message from build runner: {}", .{err}); + log.err("failed to receive message from zig build runner: {}", .{err}); return; }, }; @@ -501,12 +512,12 @@ pub const BuildOnSave = struct { collection, workspace_path, ) catch |err| { - log.err("failed to handle error bundle message from build runner: {}", .{err}); + log.err("failed to handle error bundle message from zig build runner: {}", .{err}); return; }; }, else => |tag| { - log.warn("received unexpected message from build runner: {}", .{tag}); + log.warn("received unexpected message from zig build runner: {}", .{tag}); }, } } From 3ee022a699e91eda8c541383a96ea56332a78fc4 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Fri, 20 Dec 2024 00:46:18 +0100 Subject: [PATCH 31/31] improve wording of message when using an incompatible Zig version --- src/Server.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index 1f7a044ac..3b776be52 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -1077,13 +1077,13 @@ pub fn updateConfiguration( if (zig_version_is_tagged) { server.showMessage( .Warning, - "Zig {} should be used with ZLS {}.{}.* but ZLS {} is being used.", - .{ zig_version, zig_version.major, zig_version.minor, zls_version }, + "ZLS {} does not support Zig {}. A ZLS {}.{} release should be used instead.", + .{ zls_version, zig_version, zig_version.major, zig_version.minor }, ); } else if (zls_version_is_tagged) { server.showMessage( .Warning, - "ZLS {} should be used with Zig {}.{}.* but found Zig {}.", + "ZLS {} should be used with a Zig {}.{} release but found Zig {}.", .{ zls_version, zls_version.major, zls_version.minor, zig_version }, ); } else {