From 88087f572f4c2db5983dff8b30b61f3b980281d5 Mon Sep 17 00:00:00 2001 From: Chris Waterson Date: Mon, 16 Oct 2023 20:05:21 -0700 Subject: [PATCH] Fix channel_ready Make sure that we send a `channel_ready` once we complete funding. Add logging around each of places where we change the `signer_pending_*` status. Even more better tests. --- lightning/src/ln/async_signer_tests.rs | 257 +++++++++++++++++-------- lightning/src/ln/channel.rs | 141 +++++++++----- lightning/src/ln/channelmanager.rs | 13 +- 3 files changed, 282 insertions(+), 129 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 5013e8f5b..701ecc436 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -19,10 +19,36 @@ use crate::ln::msgs::ChannelMessageHandler; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::util::test_channel_signer::ops; -const OPS: u32 = ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT; +/// Helper to run operations with a simulated asynchronous signer. +/// +/// Disables the signer for the specified channel and then runs `do_fn`, then re-enables the signer +/// and calls `signer_unblocked`. +#[cfg(test)] +pub fn with_async_signer<'a, DoFn, T>(node: &Node, peer_id: &PublicKey, channel_id: &ChannelId, masks: &Vec, do_fn: &'a DoFn) -> T + where DoFn: Fn() -> T +{ + let mask = masks.iter().fold(0, |acc, m| (acc | m)); + eprintln!("disabling {}", ops::string_from(mask)); + node.set_channel_signer_ops_available(peer_id, channel_id, mask, false); + let res = do_fn(); -#[test] -fn test_funding_created() { + // Recompute the channel ID just in case the original ID was temporary. + let new_channel_id = { + let channels = node.node.list_channels(); + assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); + channels[0].channel_id + }; + + for mask in masks { + eprintln!("enabling {} and calling signer_unblocked", ops::string_from(*mask)); + node.set_channel_signer_ops_available(peer_id, &new_channel_id, *mask, true); + node.node.signer_unblocked(Some((*peer_id, new_channel_id))); + } + res +} + +#[cfg(test)] +fn do_test_funding_created(masks: &Vec) { // Simulate acquiring the signature for `funding_created` asynchronously. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -43,22 +69,11 @@ fn test_funding_created() { // But! Let's make node[0]'s signer be unavailable: we should *not* broadcast a funding_created // message... let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); - nodes[0].set_channel_signer_ops_available(&nodes[1].node.get_our_node_id(), &temporary_channel_id, OPS, false); - nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors(&nodes[0], 0); - - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - // Now re-enable the signer and simulate a retry. The temporary_channel_id won't work anymore so - // we have to dig out the real channel ID. - let chan_id = { - let channels = nodes[0].node.list_channels(); - assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); - channels[0].channel_id - }; - - nodes[0].set_channel_signer_ops_available(&nodes[1].node.get_our_node_id(), &chan_id, OPS, true); - nodes[0].node.signer_unblocked(Some((nodes[1].node.get_our_node_id(), chan_id))); + with_async_signer(&nodes[0], &nodes[1].node.get_our_node_id(), &temporary_channel_id, masks, &|| { + nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + check_added_monitors(&nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + }); let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); @@ -73,7 +88,38 @@ fn test_funding_created() { } #[test] -fn test_for_funding_signed() { +fn test_funding_created_grs() { + do_test_funding_created(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_created_gsr() { + do_test_funding_created(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_funding_created_rsg() { + do_test_funding_created(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_created_rgs() { + do_test_funding_created(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_created_srg() { + do_test_funding_created(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_created_sgr() { + do_test_funding_created(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_funding_signed(masks: &Vec) { // Simulate acquiring the signature for `funding_signed` asynchronously. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -98,21 +144,11 @@ fn test_for_funding_signed() { // Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should // *not* broadcast a `funding_signed`... - nodes[1].set_channel_signer_ops_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, OPS, false); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors(&nodes[1], 1); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - // Now re-enable the signer and simulate a retry. The temporary_channel_id won't work anymore so - // we have to dig out the real channel ID. - let chan_id = { - let channels = nodes[0].node.list_channels(); - assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); - channels[0].channel_id - }; - nodes[1].set_channel_signer_ops_available(&nodes[0].node.get_our_node_id(), &chan_id, OPS, true); - nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id))); + with_async_signer(&nodes[1], &nodes[0].node.get_our_node_id(), &temporary_channel_id, masks, &|| { + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -124,7 +160,38 @@ fn test_for_funding_signed() { } #[test] -fn test_commitment_signed() { +fn test_funding_signed_grs() { + do_test_funding_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_gsr() { + do_test_funding_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_funding_signed_rsg() { + do_test_funding_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_rgs() { + do_test_funding_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_srg() { + do_test_funding_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_sgr() { + do_test_funding_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_commitment_signed(masks: &Vec) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -152,24 +219,48 @@ fn test_commitment_signed() { // Mark dst's signer as unavailable and handle src's commitment_signed. If dst's signer is // offline, it oughtn't yet respond with any updates. - dst.set_channel_signer_ops_available(&src.node.get_our_node_id(), &chan_id, OPS, false); - dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors(dst, 1); + with_async_signer(dst, &src.node.get_our_node_id(), &chan_id, masks, &|| { + dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(dst, 1); + assert!(dst.node.get_and_clear_pending_msg_events().is_empty()); + }); - { - let events = dst.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 0, "expected 0 events to be generated, got {}", events.len()); - } + get_revoke_commit_msgs(&dst, &src.node.get_our_node_id()); +} - // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.set_channel_signer_ops_available(&src.node.get_our_node_id(), &chan_id, OPS, true); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); +#[test] +fn test_commitment_signed_grs() { + do_test_commitment_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} - get_revoke_commit_msgs(&dst, &src.node.get_our_node_id()); +#[test] +fn test_commitment_signed_gsr() { + do_test_commitment_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_commitment_signed_rsg() { + do_test_commitment_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); } #[test] -fn test_funding_signed_0conf() { +fn test_commitment_signed_rgs() { + do_test_commitment_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_commitment_signed_srg() { + do_test_commitment_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_commitment_signed_sgr() { + do_test_commitment_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_funding_signed_0conf(masks: &Vec) { // Simulate acquiring the signature for `funding_signed` asynchronously for a zero-conf channel. let mut manually_accept_config = test_default_channel_config(); manually_accept_config.manually_accept_inbound_channels = true; @@ -187,7 +278,6 @@ fn test_funding_signed_0conf() { { let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); match &events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf( @@ -196,6 +286,7 @@ fn test_funding_signed_0conf() { }, ev => panic!("Expected OpenChannelRequest, not {:?}", ev) } + assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); } // nodes[0] <-- accept_channel --- nodes[1] @@ -212,24 +303,13 @@ fn test_funding_signed_0conf() { // Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should // *not* broadcast a `funding_signed`... - nodes[1].set_channel_signer_ops_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, OPS, false); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors(&nodes[1], 1); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - // Now re-enable the signer and simulate a retry. The temporary_channel_id won't work anymore so - // we have to dig out the real channel ID. - let chan_id = { - let channels = nodes[0].node.list_channels(); - assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); - channels[0].channel_id - }; + with_async_signer(&nodes[1], &nodes[0].node.get_our_node_id(), &temporary_channel_id, masks, &|| { + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }); // At this point, we basically expect the channel to open like a normal zero-conf channel. - nodes[1].set_channel_signer_ops_available(&nodes[0].node.get_our_node_id(), &chan_id, OPS, true); - nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id))); - let (funding_signed, channel_ready_1) = { let events = nodes[1].node.get_and_clear_pending_msg_events(); let funding_signed = match &events[0] { @@ -266,26 +346,37 @@ fn test_funding_signed_0conf() { assert_eq!(nodes[1].node.list_usable_channels().len(), 1); } -/// Helper to run operations with a simulated asynchronous signer. -/// -/// Disables the signer for the specified channel and then runs `do_fn`, then re-enables the signer -/// and calls `signer_unblocked`. -#[cfg(test)] -pub fn with_async_signer<'a, DoFn, T>(node: &Node, peer_id: &PublicKey, channel_id: &ChannelId, masks: &Vec, do_fn: &'a DoFn) -> T - where DoFn: Fn() -> T -{ - let mask = masks.iter().fold(0, |acc, m| (acc | m)); - eprintln!("disabling {}", ops::string_from(mask)); - node.set_channel_signer_ops_available(peer_id, channel_id, mask, false); - let res = do_fn(); - for mask in masks { - eprintln!("enabling {} and calling signer_unblocked", ops::string_from(*mask)); - node.set_channel_signer_ops_available(peer_id, channel_id, *mask, true); - node.node.signer_unblocked(Some((*peer_id, *channel_id))); - } - res +#[test] +fn test_funding_signed_0conf_grs() { + do_test_funding_signed_0conf(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_0conf_gsr() { + do_test_funding_signed_0conf(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_funding_signed_0conf_rsg() { + do_test_funding_signed_0conf(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_0conf_rgs() { + do_test_funding_signed_0conf(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_0conf_srg() { + do_test_funding_signed_0conf(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); } +#[test] +fn test_funding_signed_0conf_sgr() { + do_test_funding_signed_0conf(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + #[cfg(test)] fn do_test_payment(masks: &Vec) { // This runs through a one-hop payment from start to finish, simulating an asynchronous signer at diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ac8a2f565..08f3dfc8c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1292,14 +1292,20 @@ impl ChannelContext where SP::Target: SignerProvider { log_trace!(logger, "Retrieving commitment point for {} transaction number {}", self.channel_id(), transaction_number); self.cur_holder_commitment_point = match signer.get_per_commitment_point(transaction_number, &self.secp_ctx) { Ok(point) => { - log_trace!(logger, "Commitment point for {} transaction number {} retrieved", self.channel_id(), transaction_number); - self.signer_pending_commitment_point = false; + if self.signer_pending_commitment_point { + log_trace!(logger, "Commitment point for {} transaction number {} retrieved; clearing signer_pending_commitment_point", + self.channel_id(), transaction_number); + self.signer_pending_commitment_point = false; + } Some(point) } Err(_) => { - log_trace!(logger, "Commitment point for {} transaction number {} is not available", self.channel_id(), transaction_number); - self.signer_pending_commitment_point = true; + if !self.signer_pending_commitment_point { + log_trace!(logger, "Commitment point for {} transaction number {} is not available; setting signer_pending_commitment_point", + self.channel_id(), transaction_number); + self.signer_pending_commitment_point = true; + } None } }; @@ -1309,14 +1315,20 @@ impl ChannelContext where SP::Target: SignerProvider { log_trace!(logger, "Retrieving commitment secret for {} transaction number {}", self.channel_id(), releasing_transaction_number); self.prev_holder_commitment_secret = match signer.release_commitment_secret(releasing_transaction_number) { Ok(secret) => { - log_trace!(logger, "Commitment secret for {} transaction number {} retrieved", self.channel_id(), releasing_transaction_number); - self.signer_pending_released_secret = false; + if self.signer_pending_released_secret { + log_trace!(logger, "Commitment secret for {} transaction number {} retrieved; clearing signer_pending_released_secret", + self.channel_id(), releasing_transaction_number); + self.signer_pending_released_secret = false; + } Some(secret) } Err(_) => { - log_trace!(logger, "Commitment secret for {} transaction number {} is not available", self.channel_id(), releasing_transaction_number); - self.signer_pending_released_secret = true; + if !self.signer_pending_released_secret { + log_trace!(logger, "Commitment secret for {} transaction number {} is not available; setting signer_pending_released_secret", + self.channel_id(), releasing_transaction_number); + self.signer_pending_released_secret = true; + } None } } @@ -2835,7 +2847,7 @@ impl Channel where log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); - let need_channel_ready = self.check_get_channel_ready(0).is_some(); + let need_channel_ready = self.check_get_channel_ready(0, logger).is_some(); log_trace!(logger, "funding_signed {} channel_ready", if need_channel_ready { "needs" } else { "does not need" }); self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok(channel_monitor) @@ -4008,7 +4020,7 @@ impl Channel where "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); self.context.monitor_pending_channel_ready = false; self.get_channel_ready().or_else(|| { - log_trace!(logger, "Monitor was pending channel_ready with no commitment point available; setting signer_pending_channel_ready = true"); + log_trace!(logger, "Monitor was pending channel_ready with no commitment point available; setting signer_pending_channel_ready"); self.context.signer_pending_channel_ready = true; None }) @@ -4034,6 +4046,7 @@ impl Channel where let raa = if self.context.monitor_pending_revoke_and_ack { self.get_last_revoke_and_ack(logger).or_else(|| { + log_trace!(logger, "Monitor was pending RAA, but RAA is not available; setting signer_pending_revoke_and_ack"); self.context.signer_pending_revoke_and_ack = true; None }) @@ -4046,22 +4059,24 @@ impl Channel where } if self.context.monitor_pending_commitment_signed && commitment_update.is_none() { - log_debug!(logger, "Monitor was pending_commitment_signed with no commitment update available; setting signer_pending_commitment_update = true"); + log_trace!(logger, "Monitor was pending_commitment_signed with no commitment update available; setting signer_pending_commitment_update"); self.context.signer_pending_commitment_update = true; } else { // If the signer was pending a commitment update, but we happened to get one just now because // the monitor retrieved it, then we can mark the signer as "not pending anymore". if self.context.signer_pending_commitment_update && commitment_update.is_some() { + log_trace!(logger, "Signer was pending commitment update, monitor retrieved it: clearing signer_pending_commitment_update"); self.context.signer_pending_commitment_update = false; } } if self.context.monitor_pending_revoke_and_ack && raa.is_none() { - log_debug!(logger, "Monitor was pending_revoke_and_ack with no RAA available; setting signer_pending_revoke_and_ack = true"); + log_trace!(logger, "Monitor was pending_revoke_and_ack with no RAA available; setting signer_pending_revoke_and_ack"); self.context.signer_pending_revoke_and_ack = true; } else { // If the signer was pending a RAA, but we happened to get one just now because the monitor // retrieved it, then we can mark the signer as "not pending anymore". if self.context.signer_pending_revoke_and_ack && raa.is_some() { + log_trace!(logger, "Signer was pending RAA, monitor retrived it: clearing signer_pending_revoke_and_ack"); self.context.signer_pending_revoke_and_ack = false; } } @@ -4139,6 +4154,7 @@ impl Channel where let cu = if self.context.signer_pending_commitment_update { log_trace!(logger, "Attempting to generate pending commitment update..."); self.get_last_commitment_update_for_send(logger).map(|cu| { + log_trace!(logger, "Generated commitment update; clearing signer_pending_commitment_update"); self.context.signer_pending_commitment_update = false; cu }).ok() @@ -4147,6 +4163,7 @@ impl Channel where let raa = if self.context.signer_pending_revoke_and_ack && !self.context.signer_pending_commitment_update { log_trace!(logger, "Attempting to generate pending RAA..."); self.get_last_revoke_and_ack(logger).map(|raa| { + log_trace!(logger, "Generated RAA; clearing signer_pending_revoke_and_ack"); self.context.signer_pending_revoke_and_ack = false; raa }) @@ -4159,6 +4176,7 @@ impl Channel where let raa = if self.context.signer_pending_revoke_and_ack { log_trace!(logger, "Attempting to generate pending RAA..."); self.get_last_revoke_and_ack(logger).map(|raa| { + log_trace!(logger, "Generated RAA; clearing signer_pending_revoke_and_ack"); self.context.signer_pending_revoke_and_ack = false; raa }) @@ -4167,6 +4185,7 @@ impl Channel where let cu = if self.context.signer_pending_commitment_update && !self.context.signer_pending_revoke_and_ack { log_trace!(logger, "Attempting to generate pending commitment update..."); self.get_last_commitment_update_for_send(logger).map(|cu| { + log_trace!(logger, "Generated commitment update; clearing signer_pending_commitment_update"); self.context.signer_pending_commitment_update = false; cu }).ok() @@ -4189,6 +4208,7 @@ impl Channel where let channel_ready = if self.context.signer_pending_channel_ready && !self.context.signer_pending_funding { log_trace!(logger, "Attempting to generate pending channel ready..."); self.get_channel_ready().map(|msg| { + log_trace!(logger, "Generated channel_ready; clearing signer_pending_channel_ready"); self.context.signer_pending_channel_ready = false; msg }) @@ -4453,6 +4473,7 @@ impl Channel where // We have OurChannelReady set! let channel_ready = self.get_channel_ready(); if channel_ready.is_none() { + log_trace!(logger, "Could not generate channel_ready during channel_reestablish; setting signer_pending_channel_ready"); self.context.signer_pending_channel_ready = true; } @@ -4474,10 +4495,16 @@ impl Channel where None } else { self.get_last_revoke_and_ack(logger).map(|raa| { - self.context.signer_pending_revoke_and_ack = false; + if self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Generated RAA for channel_reestablish; clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + } raa }).or_else(|| { - self.context.signer_pending_revoke_and_ack = true; + if !self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Unable to generate RAA for channel_reestablish; setting signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = true; + } None }) } @@ -4499,7 +4526,10 @@ impl Channel where // We should never have to worry about MonitorUpdateInProgress resending ChannelReady log_debug!(logger, "Reconnecting channel at state 1, (re?)sending channel_ready"); self.get_channel_ready().or_else(|| { - self.context.signer_pending_channel_ready = true; + if !self.context.signer_pending_channel_ready { + log_trace!(logger, "Unable to generate channel_ready for channel_reestablish; setting signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = true; + } None }) } else { None }; @@ -5148,7 +5178,9 @@ impl Channel where self.context.channel_update_status = status; } - fn check_get_channel_ready(&mut self, height: u32) -> Option { + fn check_get_channel_ready(&mut self, height: u32, logger: &L) -> Option + where L::Target: Logger + { // Called: // * always when a new block/transactions are confirmed with the new height // * when funding is signed with a height of 0 @@ -5165,12 +5197,6 @@ impl Channel where return None; } - // If we're still pending the signature on a funding transaction, then we're not ready to send a - // channel_ready yet. - if self.context.signer_pending_funding { - return None; - } - // Note that we don't include ChannelState::WaitingForBatch as we don't want to send // channel_ready until the entire batch is ready. let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); @@ -5199,23 +5225,50 @@ impl Channel where false }; - if need_commitment_update { - if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 { - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { - if let Ok(next_per_commitment_point) = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx) { - return Some(msgs::ChannelReady { - channel_id: self.context.channel_id, - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }); - } - self.context.signer_pending_channel_ready = true; - } - } else { - self.context.monitor_pending_channel_ready = true; - } + // If we don't need a commitment update, then we don't need a channel_ready. + if !need_commitment_update { + return None + } + + // If a monitor update is in progress, flag that we're pending a channel ready from the monitor. + if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + log_trace!(logger, "Monitor update in progress; setting monitor_pending_channel_ready"); + self.context.monitor_pending_channel_ready = true; + return None; + } + + // If the peer is disconnected, then we'll worry about sending channel_ready as part of the + // reconnection process. + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 { + log_trace!(logger, "Peer is disconnected; we'll deal with channel_ready on reconnect"); + return None + } + + // If we're still pending the signature on a funding transaction, then we're not ready to send a + // channel_ready yet. + if self.context.signer_pending_funding { + log_trace!(logger, "Awaiting signer funding; setting signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = true; + return None; + } + + // If we're able to get the next per-commitment point from the signer, then return a + // channel_ready. + let res = self.context.holder_signer.as_ref().get_per_commitment_point( + INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx); + + if let Ok(next_per_commitment_point) = res { + Some(msgs::ChannelReady { + channel_id: self.context.channel_id, + next_per_commitment_point, + short_channel_id_alias: Some(self.context.outbound_scid_alias), + }) + } else { + // Otherwise, mark us as awaiting the signer to send the channel ready. + log_trace!(logger, "Awaiting signer to generate next per_commitment_point; setting signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = true; + None } - None } /// When a transaction is confirmed, we check whether it is or spends the funding transaction @@ -5282,7 +5335,7 @@ impl Channel where // If we allow 1-conf funding, we may need to check for channel_ready here and // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); msgs = (Some(channel_ready), announcement_sigs); @@ -5348,7 +5401,7 @@ impl Channel where self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; @@ -6616,6 +6669,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { let open_channel = if self.signer_pending_open_channel { self.context.update_holder_per_commitment(logger); self.get_open_channel(chain_hash.clone()).map(|msg| { + log_trace!(logger, "Clearing signer_pending_open_channel"); self.signer_pending_open_channel = false; msg }) @@ -7149,7 +7203,6 @@ impl InboundV1Channel where SP::Target: SignerProvider { log_info!(logger, "{} funding_signed for peer for channel {}", if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id()); - let signer_pending_funding = self.context.signer_pending_funding; // Promote the channel to a full-fledged one now that we have updated the state and have a // `ChannelMonitor`. @@ -7157,7 +7210,8 @@ impl InboundV1Channel where SP::Target: SignerProvider { context: self.context, }; - let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some(); + log_trace!(logger, "funding_created {} channel_ready", if need_channel_ready { "needs" } else { "does not need" }); channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok((channel, funding_signed, channel_monitor)) @@ -7172,6 +7226,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { let accept_channel = if self.signer_pending_accept_channel { self.context.update_holder_per_commitment(logger); self.generate_accept_channel_message().map(|msg| { + log_trace!(logger, "Clearing signer_pending_accept_channel"); self.signer_pending_accept_channel = false; msg }) @@ -9761,6 +9816,6 @@ mod tests { ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32, ); - assert!(node_a_chan.check_get_channel_ready(0).is_some()); + assert!(node_a_chan.check_get_channel_ready(0, &&logger).is_some()); } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 138cdc923..4180a9ded 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2433,6 +2433,7 @@ where }; let opt_msg = channel.get_open_channel(self.chain_hash); if opt_msg.is_none() { + log_trace!(self.logger, "Awaiting signer for open_channel, setting signer_pending_open_channel"); channel.signer_pending_open_channel = true; } @@ -5877,7 +5878,7 @@ where } - log_debug!(self.logger, "Outgoing message queue is:"); + log_debug!(self.logger, "Outgoing message queue is{}", if pending_msg_events.is_empty() { " empty" } else { "..." }); for msg in pending_msg_events.iter() { log_debug!(self.logger, " {:?}", msg); } @@ -6038,7 +6039,10 @@ where node_id: channel.context.get_counterparty_node_id(), msg }), - None => channel.signer_pending_accept_channel = true, + None => { + log_trace!(self.logger, "Awaiting signer for accept_channel; setting signing_pending_accept_channel"); + channel.signer_pending_accept_channel = true; + }, }; peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel)); @@ -6197,7 +6201,10 @@ where node_id: channel.context.get_counterparty_node_id(), msg }), - None => channel.signer_pending_accept_channel = true, + None => { + log_trace!(self.logger, "Awaiting signer for accept_channel; setting signer_pending_accept_channel"); + channel.signer_pending_accept_channel = true; + }, }; peer_state.channel_by_id.insert(channel_id, ChannelPhase::UnfundedInboundV1(channel)); -- 2.39.5