X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=5035384c18d7bc9b352c3da28ab66d88c9f5ad14;hb=2f798f6cc0099a10f2f8e4a3ea0f3fab6d51a612;hp=fb1f6b4a8481d33c82286c5f52ddef7c16f6d463;hpb=ee57738ca8564a31d9f919bd4b642d1449bd61a1;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fb1f6b4a..5035384c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -120,7 +120,10 @@ pub(super) struct PendingHTLCInfo { pub(super) routing: PendingHTLCRouting, pub(super) incoming_shared_secret: [u8; 32], payment_hash: PaymentHash, + /// Amount received pub(super) incoming_amt_msat: Option, // Added in 0.0.113 + /// Sender intended amount to forward or receive (actual amount received + /// may overshoot this in either case) pub(super) outgoing_amt_msat: u64, pub(super) outgoing_cltv_value: u32, } @@ -192,6 +195,9 @@ struct ClaimableHTLC { cltv_expiry: u32, /// The amount (in msats) of this MPP part value: u64, + /// The amount (in msats) that the sender intended to be sent in this MPP + /// part (used for validating total MPP amount) + sender_intended_value: u64, onion_payload: OnionPayload, timer_ticks: u8, /// The total value received for a payment (sum of all MPP parts if the payment is a MPP). @@ -280,7 +286,6 @@ pub(crate) enum HTLCSource { /// doing a double-pass on route when we get a failure back first_hop_htlc_msat: u64, payment_id: PaymentId, - payment_secret: Option, }, } #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash @@ -291,12 +296,11 @@ impl core::hash::Hash for HTLCSource { 0u8.hash(hasher); prev_hop_data.hash(hasher); }, - HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat } => { + HTLCSource::OutboundRoute { path, session_priv, payment_id, first_hop_htlc_msat } => { 1u8.hash(hasher); path.hash(hasher); session_priv[..].hash(hasher); payment_id.hash(hasher); - payment_secret.hash(hasher); first_hop_htlc_msat.hash(hasher); }, } @@ -311,7 +315,6 @@ impl HTLCSource { session_priv: SecretKey::from_slice(&[1; 32]).unwrap(), first_hop_htlc_msat: 0, payment_id: PaymentId([2; 32]), - payment_secret: None, } } } @@ -1490,18 +1493,31 @@ macro_rules! send_channel_ready { }} } +macro_rules! emit_channel_pending_event { + ($locked_events: expr, $channel: expr) => { + if $channel.should_emit_channel_pending_event() { + $locked_events.push(events::Event::ChannelPending { + channel_id: $channel.channel_id(), + former_temporary_channel_id: $channel.temporary_channel_id(), + counterparty_node_id: $channel.get_counterparty_node_id(), + user_channel_id: $channel.get_user_id(), + funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(), + }); + $channel.set_channel_pending_event_emitted(); + } + } +} + macro_rules! emit_channel_ready_event { - ($self: expr, $channel: expr) => { + ($locked_events: expr, $channel: expr) => { if $channel.should_emit_channel_ready_event() { - { - let mut pending_events = $self.pending_events.lock().unwrap(); - pending_events.push(events::Event::ChannelReady { - channel_id: $channel.channel_id(), - user_channel_id: $channel.get_user_id(), - counterparty_node_id: $channel.get_counterparty_node_id(), - channel_type: $channel.get_channel_type().clone(), - }); - } + debug_assert!($channel.channel_pending_event_emitted()); + $locked_events.push(events::Event::ChannelReady { + channel_id: $channel.channel_id(), + user_channel_id: $channel.get_user_id(), + counterparty_node_id: $channel.get_counterparty_node_id(), + channel_type: $channel.get_channel_type().clone(), + }); $channel.set_channel_ready_event_emitted(); } } @@ -2181,7 +2197,7 @@ where payment_hash, incoming_shared_secret: shared_secret, incoming_amt_msat: Some(amt_msat), - outgoing_amt_msat: amt_msat, + outgoing_amt_msat: hop_data.amt_to_forward, outgoing_cltv_value: hop_data.outgoing_cltv_value, }) } @@ -2537,7 +2553,6 @@ where session_priv: session_priv.clone(), first_hop_htlc_msat: htlc_msat, payment_id, - payment_secret: payment_secret.clone(), }, onion_packet, &self.logger); match break_chan_entry!(self, send_res, chan) { Some(monitor_update) => { @@ -3261,7 +3276,7 @@ where HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { - routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, .. + routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, .. } }) => { let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing { @@ -3283,7 +3298,11 @@ where incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret, }, - value: outgoing_amt_msat, + // We differentiate the received value from the sender intended value + // if possible so that we don't prematurely mark MPP payments complete + // if routing nodes overpay + value: incoming_amt_msat.unwrap_or(outgoing_amt_msat), + sender_intended_value: outgoing_amt_msat, timer_ticks: 0, total_value_received: None, total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat }, @@ -3339,9 +3358,9 @@ where continue } } - let mut total_value = claimable_htlc.value; + let mut total_value = claimable_htlc.sender_intended_value; for htlc in htlcs.iter() { - total_value += htlc.value; + total_value += htlc.sender_intended_value; match &htlc.onion_payload { OnionPayload::Invoice { .. } => { if htlc.total_msat != $payment_data.total_msat { @@ -3354,9 +3373,11 @@ where _ => unreachable!(), } } + // The condition determining whether an MPP is complete must + // match exactly the condition used in `timer_tick_occurred` if total_value >= msgs::MAX_VALUE_MSAT { fail_htlc!(claimable_htlc, payment_hash); - } else if total_value - claimable_htlc.value >= $payment_data.total_msat { + } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat { log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); @@ -3431,7 +3452,7 @@ where new_events.push(events::Event::PaymentClaimable { receiver_node_id: Some(receiver_node_id), payment_hash, - amount_msat: outgoing_amt_msat, + amount_msat, purpose, via_channel_id: Some(prev_channel_id), via_user_channel_id: Some(prev_user_channel_id), @@ -3691,7 +3712,9 @@ where if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload { // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). // In this case we're not going to handle any timeouts of the parts here. - if htlcs[0].total_msat <= htlcs.iter().fold(0, |total, htlc| total + htlc.value) { + // This condition determining whether the MPP is complete here must match + // exactly the condition used in `process_pending_htlc_forwards`. + if htlcs[0].total_msat <= htlcs.iter().fold(0, |total, htlc| total + htlc.sender_intended_value) { return true; } else if htlcs.into_iter().any(|htlc| { htlc.timer_ticks += 1; @@ -4170,6 +4193,7 @@ where claim_from_onchain_tx: from_onchain, prev_channel_id, next_channel_id, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, }}) } else { None } }); @@ -4238,8 +4262,6 @@ where }); } - emit_channel_ready_event!(self, channel); - macro_rules! handle_cs { () => { if let Some(update) = commitment_update { pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -4272,6 +4294,12 @@ where self.tx_broadcaster.broadcast_transaction(&tx); } + { + let mut pending_events = self.pending_events.lock().unwrap(); + emit_channel_pending_event!(pending_events, channel); + emit_channel_ready_event!(pending_events, channel); + } + htlc_forwards } @@ -4696,7 +4724,10 @@ where } } - emit_channel_ready_event!(self, chan.get_mut()); + { + let mut pending_events = self.pending_events.lock().unwrap(); + emit_channel_ready_event!(pending_events, chan.get_mut()); + } Ok(()) }, @@ -6021,7 +6052,10 @@ where } } - emit_channel_ready_event!(self, channel); + { + let mut pending_events = self.pending_events.lock().unwrap(); + emit_channel_ready_event!(pending_events, channel); + } if let Some(announcement_sigs) = announcement_sigs { log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); @@ -6132,34 +6166,11 @@ where } } - /// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool - /// indicating whether persistence is necessary. Only one listener on - /// [`await_persistable_update`], [`await_persistable_update_timeout`], or a future returned by - /// [`get_persistable_update_future`] is guaranteed to be woken up. + /// Gets a [`Future`] that completes when this [`ChannelManager`] needs to be persisted. /// - /// Note that this method is not available with the `no-std` feature. + /// Note that callbacks registered on the [`Future`] MUST NOT call back into this + /// [`ChannelManager`] and should instead register actions to be taken later. /// - /// [`await_persistable_update`]: Self::await_persistable_update - /// [`await_persistable_update_timeout`]: Self::await_persistable_update_timeout - /// [`get_persistable_update_future`]: Self::get_persistable_update_future - #[cfg(any(test, feature = "std"))] - pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool { - self.persistence_notifier.wait_timeout(max_wait) - } - - /// Blocks until ChannelManager needs to be persisted. Only one listener on - /// [`await_persistable_update`], `await_persistable_update_timeout`, or a future returned by - /// [`get_persistable_update_future`] is guaranteed to be woken up. - /// - /// [`await_persistable_update`]: Self::await_persistable_update - /// [`get_persistable_update_future`]: Self::get_persistable_update_future - pub fn await_persistable_update(&self) { - self.persistence_notifier.wait() - } - - /// Gets a [`Future`] that completes when a persistable update is available. Note that - /// callbacks registered on the [`Future`] MUST NOT call back into this [`ChannelManager`] and - /// should instead register actions to be taken later. pub fn get_persistable_update_future(&self) -> Future { self.persistence_notifier.get_future() } @@ -6813,6 +6824,7 @@ impl Writeable for ClaimableHTLC { (0, self.prev_hop, required), (1, self.total_msat, required), (2, self.value, required), + (3, self.sender_intended_value, required), (4, payment_data, option), (5, self.total_value_received, option), (6, self.cltv_expiry, required), @@ -6826,6 +6838,7 @@ impl Readable for ClaimableHTLC { fn read(reader: &mut R) -> Result { let mut prev_hop = crate::util::ser::RequiredWrapper(None); let mut value = 0; + let mut sender_intended_value = None; let mut payment_data: Option = None; let mut cltv_expiry = 0; let mut total_value_received = None; @@ -6835,6 +6848,7 @@ impl Readable for ClaimableHTLC { (0, prev_hop, required), (1, total_msat, option), (2, value, required), + (3, sender_intended_value, option), (4, payment_data, option), (5, total_value_received, option), (6, cltv_expiry, required), @@ -6864,6 +6878,7 @@ impl Readable for ClaimableHTLC { prev_hop: prev_hop.0.unwrap(), timer_ticks: 0, value, + sender_intended_value: sender_intended_value.unwrap_or(value), total_value_received, total_msat: total_msat.unwrap(), onion_payload, @@ -6881,13 +6896,11 @@ impl Readable for HTLCSource { let mut first_hop_htlc_msat: u64 = 0; let mut path: Option> = Some(Vec::new()); let mut payment_id = None; - let mut payment_secret = None; let mut payment_params: Option = None; read_tlv_fields!(reader, { (0, session_priv, required), (1, payment_id, option), (2, first_hop_htlc_msat, required), - (3, payment_secret, option), (4, path, vec_type), (5, payment_params, (option: ReadableArgs, 0)), }); @@ -6910,7 +6923,6 @@ impl Readable for HTLCSource { first_hop_htlc_msat, path, payment_id: payment_id.unwrap(), - payment_secret, }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), @@ -6922,14 +6934,14 @@ impl Readable for HTLCSource { impl Writeable for HTLCSource { fn write(&self, writer: &mut W) -> Result<(), crate::io::Error> { match self { - HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret } => { + HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => { 0u8.write(writer)?; let payment_id_opt = Some(payment_id); write_tlv_fields!(writer, { (0, session_priv, required), (1, payment_id_opt, option), (2, first_hop_htlc_msat, required), - (3, payment_secret, option), + // 3 was previously used to write a PaymentSecret for the payment. (4, *path, vec_type), (5, None::, option), // payment_params in LDK versions prior to 0.0.115 }); @@ -7336,6 +7348,7 @@ where let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); + let mut pending_background_events = Vec::new(); for _ in 0..channel_count { let mut channel: Channel<::Signer> = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) @@ -7365,9 +7378,11 @@ where log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); - let (_, mut new_failed_htlcs) = channel.force_shutdown(true); + let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); + if let Some(monitor_update) = monitor_update { + pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update)); + } failed_htlcs.append(&mut new_failed_htlcs); - monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); channel_closures.push(events::Event::ChannelClosed { channel_id: channel.channel_id(), user_channel_id: channel.get_user_id(), @@ -7432,10 +7447,13 @@ where } } - for (funding_txo, monitor) in args.channel_monitors.iter_mut() { + for (funding_txo, _) in args.channel_monitors.iter() { if !funding_txo_set.contains(funding_txo) { - log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id())); - monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); + let monitor_update = ChannelMonitorUpdate { + update_id: CLOSED_CHANNEL_UPDATE_ID, + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], + }; + pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update))); } } @@ -7488,10 +7506,17 @@ where } let background_event_count: u64 = Readable::read(reader)?; - let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); for _ in 0..background_event_count { match ::read(reader)? { - 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))), + 0 => { + let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?); + if pending_background_events.iter().find(|e| { + let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e; + *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update + }).is_none() { + pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update))); + } + } _ => return Err(DecodeError::InvalidValue), } } @@ -7579,7 +7604,7 @@ where for (_, monitor) in args.channel_monitors.iter() { if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { - if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source { + if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source { if path.is_empty() { log_error!(args.logger, "Got an empty path for a pending payment"); return Err(DecodeError::InvalidValue); @@ -7602,7 +7627,7 @@ where payment_params: None, session_privs: [session_priv_bytes].iter().map(|a| *a).collect(), payment_hash: htlc.payment_hash, - payment_secret, + payment_secret: None, // only used for retries, and we'll never retry on startup keysend_preimage: None, // only used for retries, and we'll never retry on startup pending_amt_msat: path_amt, pending_fee_msat: Some(path_fee), @@ -7866,7 +7891,7 @@ where per_peer_state: FairRwLock::new(per_peer_state), pending_events: Mutex::new(pending_events_read), - pending_background_events: Mutex::new(pending_background_events_read), + pending_background_events: Mutex::new(pending_background_events), total_consistency_lock: RwLock::new(()), persistence_notifier: Notifier::new(), @@ -7897,6 +7922,7 @@ mod tests { use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; + #[cfg(feature = "std")] use core::time::Duration; use core::sync::atomic::Ordering; use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; @@ -7922,9 +7948,9 @@ mod tests { // All nodes start with a persistable update pending as `create_network` connects each node // with all other nodes to make most tests simpler. - assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); - assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); - assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[0].node.get_persistable_update_future().poll_is_complete()); + assert!(nodes[1].node.get_persistable_update_future().poll_is_complete()); + assert!(nodes[2].node.get_persistable_update_future().poll_is_complete()); let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -7938,19 +7964,19 @@ mod tests { &nodes[0].node.get_our_node_id()).pop().unwrap(); // The first two nodes (which opened a channel) should now require fresh persistence - assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); - assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[0].node.get_persistable_update_future().poll_is_complete()); + assert!(nodes[1].node.get_persistable_update_future().poll_is_complete()); // ... but the last node should not. - assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete()); // After persisting the first two nodes they should no longer need fresh persistence. - assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); - assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete()); + assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete()); // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update // about the channel. nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0); nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1); - assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete()); // The nodes which are a party to the channel should also ignore messages from unrelated // parties. @@ -7958,8 +7984,8 @@ mod tests { nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1); nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0); nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1); - assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); - assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete()); + assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete()); // At this point the channel info given by peers should still be the same. assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info); @@ -7976,8 +8002,8 @@ mod tests { // persisted and that its channel info remains the same. nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update); nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update); - assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); - assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete()); + assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete()); assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info); assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info); @@ -7985,8 +8011,8 @@ mod tests { // the channel info has updated. nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update); nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update); - assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); - assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[0].node.get_persistable_update_future().poll_is_complete()); + assert!(nodes[1].node.get_persistable_update_future().poll_is_complete()); assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info); assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info); } @@ -8431,6 +8457,7 @@ mod tests { assert_eq!(nodes_0_lock.len(), 1); assert!(nodes_0_lock.contains_key(channel_id)); } + expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); { // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as @@ -8443,6 +8470,7 @@ mod tests { let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); check_added_monitors!(nodes[0], 1); + expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update); @@ -8585,10 +8613,13 @@ mod tests { nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); + expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); check_added_monitors!(nodes[0], 1); + expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); } open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); } @@ -8808,7 +8839,7 @@ pub mod bench { use crate::chain::chainmonitor::{ChainMonitor, Persist}; use crate::chain::keysinterface::{EntropySource, KeysManager, InMemorySigner}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider}; - use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId}; + use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{ChannelMessageHandler, Init}; use crate::routing::gossip::NetworkGraph; @@ -8889,7 +8920,24 @@ pub mod bench { } else { panic!(); } node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id())); + let events_b = node_b.get_and_clear_pending_events(); + assert_eq!(events_b.len(), 1); + match events_b[0] { + Event::ChannelPending{ ref counterparty_node_id, .. } => { + assert_eq!(*counterparty_node_id, node_a.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } + node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id())); + let events_a = node_a.get_and_clear_pending_events(); + assert_eq!(events_a.len(), 1); + match events_a[0] { + Event::ChannelPending{ ref counterparty_node_id, .. } => { + assert_eq!(*counterparty_node_id, node_b.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);