Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Various fixes for hyperlink instability #2156

Merged
merged 11 commits into from
Aug 28, 2024
Merged
8 changes: 8 additions & 0 deletions src/renderer/Metal.zig
Original file line number Diff line number Diff line change
Expand Up @@ -891,6 +891,14 @@ pub fn updateFrame(

// Update all our data as tightly as possible within the mutex.
var critical: Critical = critical: {
// const start = try std.time.Instant.now();
// const start_micro = std.time.microTimestamp();
// defer {
// const end = std.time.Instant.now() catch unreachable;
// // "[updateFrame critical time] <START us>\t<TIME_TAKEN us>"
// std.log.err("[updateFrame critical time] {}\t{}", .{start_micro, end.since(start) / std.time.ns_per_us});
// }

state.mutex.lock();
defer state.mutex.unlock();

Expand Down
102 changes: 14 additions & 88 deletions src/terminal/PageList.zig
Original file line number Diff line number Diff line change
Expand Up @@ -416,93 +416,25 @@ pub fn clone(
chunk.page.data.capacity,
&page_size,
);
assert(page.data.capacity.rows >= chunk.page.data.capacity.rows);
assert(page.data.capacity.rows >= chunk.end - chunk.start);
defer page.data.assertIntegrity();
page.data.size.rows = chunk.page.data.size.rows;
page.data.size.rows = chunk.end - chunk.start;
try page.data.cloneFrom(
&chunk.page.data,
0,
chunk.page.data.size.rows,
chunk.start,
chunk.end,
);

page_list.append(page);

// If this is a full page then we're done.
if (chunk.fullPage()) {
total_rows += page.data.size.rows;

// Updating tracked pins is easy, we just change the page
// pointer but all offsets remain the same.
if (opts.tracked_pins) |remap| {
const pin_keys = self.tracked_pins.keys();
for (pin_keys) |p| {
if (p.page != chunk.page) continue;
const new_p = try pool.pins.create();
new_p.* = p.*;
new_p.page = page;
try remap.putNoClobber(p, new_p);
try tracked_pins.putNoClobber(pool.alloc, new_p, {});
}
}

continue;
}

// If this is just a shortened chunk off the end we can just
// shorten the size. We don't worry about clearing memory here because
// as the page grows the memory will be reclaimable because the data
// is still valid.
if (chunk.start == 0) {
page.data.size.rows = @intCast(chunk.end);
total_rows += chunk.end;
total_rows += page.data.size.rows;

// Updating tracked pins for the pins that are in the shortened chunk.
if (opts.tracked_pins) |remap| {
const pin_keys = self.tracked_pins.keys();
for (pin_keys) |p| {
if (p.page != chunk.page or
p.y >= chunk.end) continue;
const new_p = try pool.pins.create();
new_p.* = p.*;
new_p.page = page;
try remap.putNoClobber(p, new_p);
try tracked_pins.putNoClobber(pool.alloc, new_p, {});
}
}

continue;
}

// We want to maintain the dirty bits from the original page so
// instead of setting a range we grab the dirty bit and then
// set it on the new page in the new location.
var dirty = page.data.dirtyBitSet();

// Kind of slow, we want to shift the rows up in the page up to
// end and then resize down.
const rows = page.data.rows.ptr(page.data.memory);
const len = chunk.end - chunk.start;
for (0..len) |i| {
const src: *Row = &rows[i + chunk.start];
const dst: *Row = &rows[i];
const old_dst = dst.*;
dst.* = src.*;
src.* = old_dst;
dirty.setValue(i, dirty.isSet(i + chunk.start));
}

// We need to clear the rows we're about to truncate.
for (len..page.data.size.rows) |i| {
page.data.clearCells(&rows[i], 0, page.data.size.cols);
}

page.data.size.rows = @intCast(len);
total_rows += len;

// Updating tracked pins
// Remap our tracked pins by changing the page and
// offsetting the Y position based on the chunk start.
if (opts.tracked_pins) |remap| {
const pin_keys = self.tracked_pins.keys();
for (pin_keys) |p| {
// We're only interested in pins that were within the chunk.
if (p.page != chunk.page or
p.y < chunk.start or
p.y >= chunk.end) continue;
Expand Down Expand Up @@ -948,17 +880,15 @@ const ReflowCursor = struct {
},
}

// Prevent integrity checks from tripping
// while copying graphemes and hyperlinks.
if (comptime std.debug.runtime_safety) {
self.page_cell.style_id = stylepkg.default_id;
}
// These will create issues by trying to clone managed memory that
// isn't set if the current dst row needs to be moved to a new page.
// They'll be fixed once we do properly copy the relevant memory.
self.page_cell.content_tag = .codepoint;
self.page_cell.hyperlink = false;
self.page_cell.style_id = stylepkg.default_id;

// Copy grapheme data.
if (cell.content_tag == .codepoint_grapheme) {
// The tag is asserted to be .codepoint in setGraphemes.
self.page_cell.content_tag = .codepoint;

// Copy the graphemes
const cps = src_page.lookupGrapheme(cell).?;

Expand All @@ -981,8 +911,6 @@ const ReflowCursor = struct {
}
}

self.page_row.grapheme = true;

// This shouldn't fail since we made sure we have space above.
try self.page.setGraphemes(self.page_row, self.page_cell, cps);
}
Expand Down Expand Up @@ -1015,8 +943,6 @@ const ReflowCursor = struct {
);
} orelse src_id;

self.page_row.hyperlink = true;

// We expect this to succeed due to the
// hyperlinkCapacity check we did before.
try self.page.setHyperlink(
Expand Down
Loading
Loading