Skip to content

Commit

Permalink
embrace RLS even harder
Browse files Browse the repository at this point in the history
  • Loading branch information
xdBronch committed Jan 4, 2025
1 parent 19421e0 commit 3f8d836
Show file tree
Hide file tree
Showing 26 changed files with 161 additions and 161 deletions.
4 changes: 2 additions & 2 deletions build.zig
Original file line number Diff line number Diff line change
Expand Up @@ -359,9 +359,9 @@ fn getTracyModule(

// On mingw, we need to opt into windows 7+ to get some features required by tracy.
const tracy_c_flags: []const []const u8 = if (options.target.result.isMinGW())
&[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined", "-D_WIN32_WINNT=0x601" }
&.{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined", "-D_WIN32_WINNT=0x601" }
else
&[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" };
&.{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" };

tracy_module.addIncludePath(tracy_dependency.path(""));
tracy_module.addCSourceFile(.{
Expand Down
2 changes: 1 addition & 1 deletion src/BuildAssociatedConfig.zig
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ pub const BuildOption = struct {
try allocator.dupe(u8, val)
else
null;
return BuildOption{
return .{
.name = copy_name,
.value = copy_value,
};
Expand Down
8 changes: 4 additions & 4 deletions src/DocumentScope.zig
Original file line number Diff line number Diff line change
Expand Up @@ -493,10 +493,10 @@ const ScopeContext = struct {
.loc = loc,
.parent_scope = context.current_scope,
.child_scopes = .{
.small = [_]Scope.OptionalIndex{.none} ** Scope.ChildScopes.small_size,
.small = @splat(.none),
},
.child_declarations = .{
.small = [_]Declaration.OptionalIndex{.none} ** Scope.ChildDeclarations.small_size,
.small = @splat(.none),
},
});
const new_scope_index: Scope.Index = @enumFromInt(context.doc_scope.scopes.len - 1);
Expand Down Expand Up @@ -896,7 +896,7 @@ noinline fn walkContainerDecl(
const gop = try context.doc_scope.global_enum_set.getOrPutContext(
context.allocator,
main_token,
IdentifierTokenContext{ .tree = tree },
.{ .tree = tree },
);
if (!gop.found_existing) {
gop.key_ptr.* = main_token;
Expand Down Expand Up @@ -963,7 +963,7 @@ noinline fn walkErrorSetNode(
const gop = try context.doc_scope.global_error_set.getOrPutContext(
context.allocator,
identifier_token,
IdentifierTokenContext{ .tree = tree },
.{ .tree = tree },
);
if (!gop.found_existing or token_tags[identifier_token - 1] == .doc_comment) {
// a token with a doc comment takes priority.
Expand Down
12 changes: 6 additions & 6 deletions src/DocumentStore.zig
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ pub const Handle = struct {
/// private field
impl: struct {
/// @bitCast from/to `Status`
status: std.atomic.Value(u32) = .init(@bitCast(Status{})),
status: std.atomic.Value(u32) = .init(0),
/// TODO can we avoid storing one allocator per Handle?
allocator: std.mem.Allocator,

Expand Down Expand Up @@ -234,14 +234,14 @@ pub const Handle = struct {
/// all other threads will wait until the given thread has computed the `std.zig.Zir` before reading it.
/// true if `handle.impl.zir` has been set
has_zir: bool = false,
zir_outdated: bool = undefined,
zir_outdated: bool = false,
/// true if a thread has acquired the permission to compute the `std.zig.Zoir`
has_zoir_lock: bool = false,
/// all other threads will wait until the given thread has computed the `std.zig.Zoir` before reading it.
/// true if `handle.impl.zoir` has been set
has_zoir: bool = false,
zoir_outdated: bool = undefined,
_: u23 = undefined,
zoir_outdated: bool = false,
_: u23 = 0,
};

pub const ZirOrZoirStatus = enum {
Expand Down Expand Up @@ -1090,7 +1090,7 @@ fn loadBuildConfiguration(self: *DocumentStore, build_file_uri: Uri) !std.json.P
errdefer build_config.deinit();

for (build_config.value.packages) |*pkg| {
pkg.path = try std.fs.path.resolve(build_config.arena.allocator(), &[_][]const u8{ build_file_path, "..", pkg.path });
pkg.path = try std.fs.path.resolve(build_config.arena.allocator(), &.{ build_file_path, "..", pkg.path });
}

return build_config;
Expand Down Expand Up @@ -1150,7 +1150,7 @@ fn createBuildFile(self: *DocumentStore, uri: Uri) error{OutOfMemory}!BuildFile
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();

var build_file = BuildFile{
var build_file: BuildFile = .{
.uri = try self.allocator.dupe(u8, uri),
};

Expand Down
14 changes: 7 additions & 7 deletions src/Server.zig
Original file line number Diff line number Diff line change
Expand Up @@ -599,7 +599,7 @@ fn initializeHandler(server: *Server, arena: std.mem.Allocator, request: types.I
.renameProvider = .{ .bool = true },
.completionProvider = .{
.resolveProvider = false,
.triggerCharacters = &[_][]const u8{ ".", ":", "@", "]", "\"", "/" },
.triggerCharacters = &.{ ".", ":", "@", "]", "\"", "/" },
.completionItem = .{ .labelDetailsSupport = true },
},
.documentHighlightProvider = .{ .bool = true },
Expand Down Expand Up @@ -690,7 +690,7 @@ fn registerCapability(server: *Server, method: []const u8) Error!void {
.{ .string = id },
"client/registerCapability",
types.RegistrationParams{ .registrations = &.{
types.Registration{
.{
.id = id,
.method = method,
},
Expand Down Expand Up @@ -1320,7 +1320,7 @@ fn resolveConfiguration(
};
defer allocator.free(cache_dir_path);

config.global_cache_path = try std.fs.path.join(config_arena, &[_][]const u8{ cache_dir_path, "zls" });
config.global_cache_path = try std.fs.path.join(config_arena, &.{ cache_dir_path, "zls" });

std.fs.cwd().makePath(config.global_cache_path.?) catch |err| {
log.warn("failed to create directory '{s}': {}", .{ config.global_cache_path.?, err });
Expand Down Expand Up @@ -1473,7 +1473,7 @@ fn saveDocumentHandler(server: *Server, arena: std.mem.Allocator, notification:
const handle = server.document_store.getHandle(uri) orelse return;
var text_edits = try server.autofix(arena, handle);

var workspace_edit = types.WorkspaceEdit{ .changes = .{} };
var workspace_edit: types.WorkspaceEdit = .{ .changes = .{} };
try workspace_edit.changes.?.map.putNoClobber(arena, uri, try text_edits.toOwnedSlice(arena));

const json_message = try server.sendToClientRequest(
Expand Down Expand Up @@ -1885,12 +1885,12 @@ fn isBlockingMessage(msg: Message) bool {
pub fn create(allocator: std.mem.Allocator) !*Server {
const server = try allocator.create(Server);
errdefer server.destroy();
server.* = Server{
server.* = .{
.allocator = allocator,
.config = .{},
.document_store = .{
.allocator = allocator,
.config = .fromMainConfig(Config{}),
.config = .fromMainConfig(.{}),
.thread_pool = if (zig_builtin.single_threaded) {} else undefined, // set below
.diagnostics_collection = &server.diagnostics_collection,
},
Expand Down Expand Up @@ -2114,7 +2114,7 @@ fn processMessageReportError(server: *Server, message: Message) ?[]const u8 {
}

switch (message) {
.request => |request| return server.sendToClientResponseError(request.id, lsp.JsonRPCMessage.Response.Error{
.request => |request| return server.sendToClientResponseError(request.id, .{
.code = @enumFromInt(switch (err) {
error.OutOfMemory => @intFromEnum(types.ErrorCodes.InternalError),
error.ParseError => @intFromEnum(types.ErrorCodes.ParseError),
Expand Down
Loading

0 comments on commit 3f8d836

Please sign in to comment.