Skip to content

Commit

Permalink
Remove the async_signing cfg flag
Browse files Browse the repository at this point in the history
Now that the core features required for `async_signing` are in
place, we can go ahead and expose it publicly (rather than behind a
a `cfg`-flag). We still don't have full async support for
`get_per_commitment_point`, but only one case in channel
reconnection remains. The overall logic may still have some
hiccups, but its been in use in production at a major LDK user for
some time now. Thus, it doesn't really make sense to hide behind a
`cfg`-flag, even if the feature is only 99% complete. Further, the
new paths exposed are very restricted to signing operations that
run async, so the risk for existing users should be incredibly low.
  • Loading branch information
TheBlueMatt committed Dec 16, 2024
1 parent d1e94bd commit 47ca19d
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 65 deletions.
1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ check-cfg = [
"cfg(ldk_bench)",
"cfg(ldk_test_vectors)",
"cfg(taproot)",
"cfg(async_signing)",
"cfg(require_route_graph_test)",
"cfg(splicing)",
"cfg(async_payments)",
Expand Down
2 changes: 0 additions & 2 deletions ci/ci-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,6 @@ fi
echo -e "\n\nTest cfg-flag builds"
RUSTFLAGS="--cfg=taproot" cargo test --verbose --color always -p lightning
[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
RUSTFLAGS="--cfg=async_signing" cargo test --verbose --color always -p lightning
[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
RUSTFLAGS="--cfg=splicing" cargo test --verbose --color always -p lightning
[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
RUSTFLAGS="--cfg=async_payments" cargo test --verbose --color always -p lightning
82 changes: 24 additions & 58 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -906,7 +906,6 @@ pub(super) struct MonitorRestoreUpdates {
}

/// The return value of `signer_maybe_unblocked`
#[allow(unused)]
pub(super) struct SignerResumeUpdates {
pub commitment_update: Option<msgs::CommitmentUpdate>,
pub revoke_and_ack: Option<msgs::RevokeAndACK>,
Expand Down Expand Up @@ -3959,13 +3958,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
self.signer_pending_funding = false;
} else if signature.is_none() {
#[cfg(not(async_signing))] {
panic!("Failed to get signature for funding_signed");
}
#[cfg(async_signing)] {
log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
self.signer_pending_funding = true;
}
log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
self.signer_pending_funding = true;
}

signature.map(|(signature, _)| msgs::FundingSigned {
Expand Down Expand Up @@ -6114,7 +6108,6 @@ impl<SP: Deref> Channel<SP> where

/// Indicates that the signer may have some signatures for us, so we should retry if we're
/// blocked.
#[cfg(async_signing)]
pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
if !self.holder_commitment_point.is_available() {
log_trace!(logger, "Attempting to update holder per-commitment point...");
Expand Down Expand Up @@ -6231,21 +6224,16 @@ impl<SP: Deref> Channel<SP> where
&self.context.channel_id(), self.holder_commitment_point.transaction_number(),
self.holder_commitment_point.transaction_number() + 2);
}
#[cfg(not(async_signing))] {
panic!("Holder commitment point and per commitment secret must be available when generating revoke_and_ack");
}
#[cfg(async_signing)] {
// Technically if we're at HolderCommitmentPoint::PendingNext,
// we have a commitment point ready to send in an RAA, however we
// choose to wait since if we send RAA now, we could get another
// CS before we have any commitment point available. Blocking our
// RAA here is a convenient way to make sure that post-funding
// we're only ever waiting on one commitment point at a time.
log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available",
&self.context.channel_id(), self.holder_commitment_point.transaction_number());
self.context.signer_pending_revoke_and_ack = true;
None
}
// Technically if we're at HolderCommitmentPoint::PendingNext,
// we have a commitment point ready to send in an RAA, however we
// choose to wait since if we send RAA now, we could get another
// CS before we have any commitment point available. Blocking our
// RAA here is a convenient way to make sure that post-funding
// we're only ever waiting on one commitment point at a time.
log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available",
&self.context.channel_id(), self.holder_commitment_point.transaction_number());
self.context.signer_pending_revoke_and_ack = true;
None
}

/// Gets the last commitment update for immediate sending to our peer.
Expand Down Expand Up @@ -6316,16 +6304,11 @@ impl<SP: Deref> Channel<SP> where
}
update
} else {
#[cfg(not(async_signing))] {
panic!("Failed to get signature for new commitment state");
}
#[cfg(async_signing)] {
if !self.context.signer_pending_commitment_update {
log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
self.context.signer_pending_commitment_update = true;
}
return Err(());
if !self.context.signer_pending_commitment_update {
log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
self.context.signer_pending_commitment_update = true;
}
return Err(());
};
Ok(msgs::CommitmentUpdate {
update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
Expand Down Expand Up @@ -8362,13 +8345,8 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
self.context.signer_pending_funding = false;
} else if signature.is_none() {
#[cfg(not(async_signing))] {
panic!("Failed to get signature for new funding creation");
}
#[cfg(async_signing)] {
log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
self.context.signer_pending_funding = true;
}
log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
self.context.signer_pending_funding = true;
};

signature.map(|signature| msgs::FundingCreated {
Expand Down Expand Up @@ -8467,14 +8445,9 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
holder_commitment_point.current_point()
},
_ => {
#[cfg(not(async_signing))] {
panic!("Failed getting commitment point for open_channel message");
}
#[cfg(async_signing)] {
log_trace!(_logger, "Unable to generate open_channel message, waiting for commitment point");
self.signer_pending_open_channel = true;
return None;
}
log_trace!(_logger, "Unable to generate open_channel message, waiting for commitment point");
self.signer_pending_open_channel = true;
return None;
}
};
let keys = self.context.get_holder_pubkeys();
Expand Down Expand Up @@ -8562,7 +8535,6 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {

/// Indicates that the signer may have some signatures for us, so we should retry if we're
/// blocked.
#[cfg(async_signing)]
pub fn signer_maybe_unblocked<L: Deref>(
&mut self, chain_hash: ChainHash, logger: &L
) -> (Option<msgs::OpenChannel>, Option<msgs::FundingCreated>) where L::Target: Logger {
Expand Down Expand Up @@ -8723,14 +8695,9 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
holder_commitment_point.current_point()
},
_ => {
#[cfg(not(async_signing))] {
panic!("Failed getting commitment point for accept_channel message");
}
#[cfg(async_signing)] {
log_trace!(_logger, "Unable to generate accept_channel message, waiting for commitment point");
self.signer_pending_accept_channel = true;
return None;
}
log_trace!(_logger, "Unable to generate accept_channel message, waiting for commitment point");
self.signer_pending_accept_channel = true;
return None;
}
};
let keys = self.context.get_holder_pubkeys();
Expand Down Expand Up @@ -8833,7 +8800,6 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {

/// Indicates that the signer may have some signatures for us, so we should retry if we're
/// blocked.
#[allow(unused)]
pub fn signer_maybe_unblocked<L: Deref>(
&mut self, logger: &L
) -> Option<msgs::AcceptChannel> where L::Target: Logger {
Expand Down
1 change: 0 additions & 1 deletion lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9512,7 +9512,6 @@ where
/// attempted in every channel, or in the specifically provided channel.
///
/// [`ChannelSigner`]: crate::sign::ChannelSigner
#[cfg(async_signing)]
pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);

Expand Down
2 changes: 1 addition & 1 deletion lightning/src/ln/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ mod monitor_tests;
#[cfg(test)]
#[allow(unused_mut)]
mod shutdown_tests;
#[cfg(all(test, async_signing))]
#[cfg(test)]
#[allow(unused_mut)]
mod async_signer_tests;
#[cfg(test)]
Expand Down
3 changes: 1 addition & 2 deletions lightning/src/sign/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -728,8 +728,7 @@ pub trait ChannelSigner {
/// Note that the commitment number starts at `(1 << 48) - 1` and counts backwards.
///
/// If the signer returns `Err`, then the user is responsible for either force-closing the channel
/// or calling `ChannelManager::signer_unblocked` (this method is only available when the
/// `async_signing` cfg flag is enabled) once the signature is ready.
/// or calling `ChannelManager::signer_unblocked` once the signature is ready.
///
// TODO: link to `signer_unblocked` once the cfg flag is removed
fn get_per_commitment_point(
Expand Down

0 comments on commit 47ca19d

Please sign in to comment.