Merge pull request #1479 from ViktorTigerstrom/2022-05-pass-counterparty-id-to-functions
authorArik Sosman <arik-so@users.noreply.github.com>
Mon, 16 May 2022 19:44:16 +0000 (12:44 -0700)
committerGitHub <noreply@github.com>
Mon, 16 May 2022 19:44:16 +0000 (12:44 -0700)
Pass `counterparty_node_id` to `ChannelManager` functions

15 files changed:
fuzz/src/chanmon_consistency.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/mod.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/util/events.rs
lightning/src/util/fairrwlock.rs
lightning/src/util/mod.rs
lightning/src/util/test_utils.rs

index a86f1e5e0118e38ac1fc2812b99377a2f595dda4..1b662c0e721c05b49b55bd65a35af52026988481 100644 (file)
@@ -148,7 +148,7 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
                self.chain_monitor.update_channel(funding_txo, update)
        }
 
-       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
 }
index aae260e735bdbae5c0a538af8024e7e2ef2a78cb..503e6bdee0669551d1853932447a5db08fc92c17 100644 (file)
@@ -235,7 +235,7 @@ pub struct ChainMonitor<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: De
        persister: P,
        /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
        /// from the user and not from a [`ChannelMonitor`].
-       pending_monitor_events: Mutex<Vec<MonitorEvent>>,
+       pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>)>>,
        /// The best block height seen, used as a proxy for the passage of time.
        highest_chain_height: AtomicUsize,
 }
@@ -299,7 +299,7 @@ where C::Target: chain::Filter,
                                                        log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
                                                Err(ChannelMonitorUpdateErr::PermanentFailure) => {
                                                        monitor_state.channel_perm_failed.store(true, Ordering::Release);
-                                                       self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateFailed(*funding_outpoint));
+                                                       self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)]));
                                                },
                                                Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
                                                        log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
@@ -455,10 +455,10 @@ where C::Target: chain::Filter,
                                        // UpdateCompleted event.
                                        return Ok(());
                                }
-                               self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted {
+                               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
                                        funding_txo,
                                        monitor_update_id: monitor_data.monitor.get_latest_update_id(),
-                               });
+                               }]));
                        },
                        MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
                                if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
@@ -476,10 +476,10 @@ where C::Target: chain::Filter,
        /// channel_monitor_updated once with the highest ID.
        #[cfg(any(test, fuzzing))]
        pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
-               self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted {
+               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
                        funding_txo,
                        monitor_update_id,
-               });
+               }]));
        }
 
        #[cfg(any(test, fuzzing, feature = "_test_utils"))]
@@ -666,7 +666,7 @@ where C::Target: chain::Filter,
                }
        }
 
-       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
                let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
                for monitor_state in self.monitors.read().unwrap().values() {
                        let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
@@ -692,7 +692,11 @@ where C::Target: chain::Filter,
                                        log_error!(self.logger, "   To avoid funds-loss, we are allowing monitor updates to be released.");
                                        log_error!(self.logger, "   This may cause duplicate payment events to be generated.");
                                }
-                               pending_monitor_events.append(&mut monitor_state.monitor.get_and_clear_pending_monitor_events());
+                               let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
+                               if monitor_events.len() > 0 {
+                                       let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
+                                       pending_monitor_events.push((monitor_outpoint, monitor_events));
+                               }
                        }
                }
                pending_monitor_events
index 24eb09e7090b212ea9867aba9844ed6993c83735..48aa712f39c1d55263f7cd5129e0b3cce3690d41 100644 (file)
@@ -302,7 +302,7 @@ pub trait Watch<ChannelSigner: Sign> {
        ///
        /// For details on asynchronous [`ChannelMonitor`] updating and returning
        /// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`].
-       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent>;
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)>;
 }
 
 /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
index 4001f6a9299c818e9e02f7c695b138cdc50aef07..91688cc4fa3c3eed26b46b991c00dee94b2106e6 100644 (file)
@@ -1102,7 +1102,7 @@ fn test_monitor_update_fail_reestablish() {
        assert!(updates.update_fee.is_none());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], Some(1000), false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
@@ -2087,7 +2087,7 @@ fn test_fail_htlc_on_broadcast_after_claim() {
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(nodes[1], 1);
-       expect_payment_forwarded!(nodes[1], nodes[0], Some(1000), false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
 
        mine_transaction(&nodes[1], &bs_txn[0]);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
@@ -2468,7 +2468,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
                assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
        }
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
-       expect_payment_forwarded!(nodes[1], nodes[0], Some(1000), false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
        check_added_monitors!(nodes[1], 1);
 
        let mut bs_updates = None;
index 8e39e87185b8101fcf0d15da688b0381faf14959..8725059c360c47435698c71672f81f545b8bab77 100644 (file)
@@ -3958,7 +3958,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
+       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                mem::drop(channel_state_lock);
@@ -4049,12 +4049,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                } else { None };
 
                                                let mut pending_events = self.pending_events.lock().unwrap();
+                                               let prev_channel_id = Some(prev_outpoint.to_channel_id());
+                                               let next_channel_id = Some(next_channel_id);
 
-                                               let source_channel_id = Some(prev_outpoint.to_channel_id());
                                                pending_events.push(events::Event::PaymentForwarded {
-                                                       source_channel_id,
                                                        fee_earned_msat,
                                                        claim_from_onchain_tx: from_onchain,
+                                                       prev_channel_id,
+                                                       next_channel_id,
                                                });
                                        }
                                }
@@ -4513,7 +4515,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false);
+               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
                Ok(())
        }
 
@@ -4833,48 +4835,50 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for monitor_event in pending_monitor_events.drain(..) {
-                       match monitor_event {
-                               MonitorEvent::HTLCEvent(htlc_update) => {
-                                       if let Some(preimage) = htlc_update.payment_preimage {
-                                               log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                               self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true);
-                                       } else {
-                                               log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
-                                       }
-                               },
-                               MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
-                               MonitorEvent::UpdateFailed(funding_outpoint) => {
-                                       let mut channel_lock = self.channel_state.lock().unwrap();
-                                       let channel_state = &mut *channel_lock;
-                                       let by_id = &mut channel_state.by_id;
-                                       let pending_msg_events = &mut channel_state.pending_msg_events;
-                                       if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) {
-                                               let mut chan = remove_channel!(self, channel_state, chan_entry);
-                                               failed_channels.push(chan.force_shutdown(false));
-                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
+               for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) {
+                       for monitor_event in monitor_events.drain(..) {
+                               match monitor_event {
+                                       MonitorEvent::HTLCEvent(htlc_update) => {
+                                               if let Some(preimage) = htlc_update.payment_preimage {
+                                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+                                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
+                                               } else {
+                                                       log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                                               }
+                                       },
+                                       MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
+                                       MonitorEvent::UpdateFailed(funding_outpoint) => {
+                                               let mut channel_lock = self.channel_state.lock().unwrap();
+                                               let channel_state = &mut *channel_lock;
+                                               let by_id = &mut channel_state.by_id;
+                                               let pending_msg_events = &mut channel_state.pending_msg_events;
+                                               if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) {
+                                                       let mut chan = remove_channel!(self, channel_state, chan_entry);
+                                                       failed_channels.push(chan.force_shutdown(false));
+                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: update
+                                                               });
+                                                       }
+                                                       let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
+                                                               ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
+                                                       } else {
+                                                               ClosureReason::CommitmentTxConfirmed
+                                                       };
+                                                       self.issue_channel_close_events(&chan, reason);
+                                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                               node_id: chan.get_counterparty_node_id(),
+                                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                                       msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+                                                               },
                                                        });
                                                }
-                                               let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
-                                                       ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
-                                               } else {
-                                                       ClosureReason::CommitmentTxConfirmed
-                                               };
-                                               self.issue_channel_close_events(&chan, reason);
-                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                       node_id: chan.get_counterparty_node_id(),
-                                                       action: msgs::ErrorAction::SendErrorMessage {
-                                                               msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
-                                                       },
-                                               });
-                                       }
-                               },
-                               MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
-                                       self.channel_monitor_updated(&funding_txo, monitor_update_id);
-                               },
+                                       },
+                                       MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
+                                               self.channel_monitor_updated(&funding_txo, monitor_update_id);
+                                       },
+                               }
                        }
                }
 
@@ -5704,39 +5708,21 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        let channel_state = &mut *channel_state_lock;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        let short_to_id = &mut channel_state.short_to_id;
-                       if no_connection_possible {
-                               log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id));
-                               channel_state.by_id.retain(|_, chan| {
-                                       if chan.get_counterparty_node_id() == *counterparty_node_id {
+                       log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
+                               log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+                       channel_state.by_id.retain(|_, chan| {
+                               if chan.get_counterparty_node_id() == *counterparty_node_id {
+                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+                                       if chan.is_shutdown() {
                                                update_maps_on_chan_removal!(self, short_to_id, chan);
-                                               failed_channels.push(chan.force_shutdown(true));
-                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
-                                                       });
-                                               }
                                                self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
-                                               false
+                                               return false;
                                        } else {
-                                               true
-                                       }
-                               });
-                       } else {
-                               log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
-                               channel_state.by_id.retain(|_, chan| {
-                                       if chan.get_counterparty_node_id() == *counterparty_node_id {
-                                               chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
-                                               if chan.is_shutdown() {
-                                                       update_maps_on_chan_removal!(self, short_to_id, chan);
-                                                       self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
-                                                       return false;
-                                               } else {
-                                                       no_channels_remain = false;
-                                               }
+                                               no_channels_remain = false;
                                        }
-                                       true
-                               })
-                       }
+                               }
+                               true
+                       });
                        pending_msg_events.retain(|msg| {
                                match msg {
                                        &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
index fed5fe289e7b2b52c56166d0fd0c518b251d0651..c0e33e5b93a51a4d620f6c19eec016a15468f70e 100644 (file)
@@ -1335,15 +1335,20 @@ macro_rules! expect_payment_path_successful {
 }
 
 macro_rules! expect_payment_forwarded {
-       ($node: expr, $source_node: expr, $expected_fee: expr, $upstream_force_closed: expr) => {
+       ($node: expr, $prev_node: expr, $next_node: expr, $expected_fee: expr, $upstream_force_closed: expr, $downstream_force_closed: expr) => {
                let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
-                       Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+                       Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                                assert_eq!(fee_earned_msat, $expected_fee);
                                if fee_earned_msat.is_some() {
-                                       // Is the event channel_id in one of the channels between the two nodes?
-                                       assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $source_node.node.get_our_node_id() && x.channel_id == source_channel_id.unwrap()));
+                                       // Is the event prev_channel_id in one of the channels between the two nodes?
+                                       assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $prev_node.node.get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
+                               }
+                               // We check for force closures since a force closed channel is removed from the
+                               // node's channel list
+                               if !$downstream_force_closed {
+                                       assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
                                }
                                assert_eq!(claim_from_onchain_tx, $upstream_force_closed);
                        },
@@ -1587,7 +1592,7 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>,
                                {
                                        $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
                                        let fee = $node.node.channel_state.lock().unwrap().by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap().config.forwarding_fee_base_msat;
-                                       expect_payment_forwarded!($node, $next_node, Some(fee as u64), false);
+                                       expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false);
                                        expected_total_fee_msat += fee as u64;
                                        check_added_monitors!($node, 1);
                                        let new_next_msgs = if $new_msgs {
index 5f24314a29f320f507084b51d22130f9c08b3b01..88fd6fedab8414f9486425bc24f3bea7034ac5e4 100644 (file)
@@ -2178,9 +2178,9 @@ fn channel_monitor_network_test() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
 
        // Simple case with no pending HTLCs:
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
+       nodes[1].node.force_close_channel(&chan_1.2).unwrap();
        check_added_monitors!(nodes[1], 1);
-       check_closed_broadcast!(nodes[1], false);
+       check_closed_broadcast!(nodes[1], true);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                assert_eq!(node_txn.len(), 1);
@@ -2192,15 +2192,15 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
 
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
-       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
-       check_closed_broadcast!(nodes[1], false);
+       nodes[1].node.force_close_channel(&chan_2.2).unwrap();
+       check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
@@ -2213,7 +2213,7 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        macro_rules! claim_funds {
@@ -2238,9 +2238,9 @@ fn channel_monitor_network_test() {
 
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
-       nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+       nodes[2].node.force_close_channel(&chan_3.2).unwrap();
        check_added_monitors!(nodes[2], 1);
-       check_closed_broadcast!(nodes[2], false);
+       check_closed_broadcast!(nodes[2], true);
        let node2_commitment_txid;
        {
                let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
@@ -2257,7 +2257,7 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
        check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
@@ -2708,18 +2708,20 @@ fn test_htlc_on_chain_success() {
        }
        let chan_id = Some(chan_1.2);
        match forwarded_events[1] {
-               Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                        assert_eq!(fee_earned_msat, Some(1000));
-                       assert_eq!(source_channel_id, chan_id);
+                       assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
+                       assert_eq!(next_channel_id, Some(chan_2.2));
                },
                _ => panic!()
        }
        match forwarded_events[2] {
-               Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                        assert_eq!(fee_earned_msat, Some(1000));
-                       assert_eq!(source_channel_id, chan_id);
+                       assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
+                       assert_eq!(next_channel_id, Some(chan_2.2));
                },
                _ => panic!()
        }
@@ -5180,10 +5182,11 @@ fn test_onchain_to_onchain_claim() {
                _ => panic!("Unexpected event"),
        }
        match events[1] {
-               Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                        assert_eq!(fee_earned_msat, Some(1000));
-                       assert_eq!(source_channel_id, Some(chan_1.2));
+                       assert_eq!(prev_channel_id, Some(chan_1.2));
                        assert_eq!(claim_from_onchain_tx, true);
+                       assert_eq!(next_channel_id, Some(chan_2.2));
                },
                _ => panic!("Unexpected event"),
        }
@@ -5350,7 +5353,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        // Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee
        // and nodes[2] fee) is rounded down and then claimed in full.
        mine_transaction(&nodes[1], &htlc_success_txn[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], Some(196*2), true);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196*2), true, true);
        let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
        assert!(updates.update_fail_htlcs.is_empty());
@@ -9016,7 +9019,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
 
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
        // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
        if !go_onchain_before_fulfill && broadcast_alice {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
index 336e99a5c44573ebe4d469e226a9d3b12e282af2..2d57950029e319fcabfb9532b87cda421b283dcf 100644 (file)
@@ -495,7 +495,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(bs_htlc_claim_txn.len(), 1);
        check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
-       expect_payment_forwarded!(nodes[1], nodes[0], None, false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false);
 
        if !confirm_before_reload {
                mine_transaction(&nodes[0], &as_commitment_tx);
index 633876ee375124ae2f9008fe8c5f0194bb60deb3..4b777b0b99446e0d4781e6029592b60ee0e24ff7 100644 (file)
@@ -258,8 +258,13 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
 /// descriptor.
 #[derive(Clone)]
 pub struct PeerHandleError {
-       /// Used to indicate that we probably can't make any future connections to this peer, implying
-       /// we should go ahead and force-close any channels we have with it.
+       /// Used to indicate that we probably can't make any future connections to this peer (e.g.
+       /// because we required features that our peer was missing, or vice versa).
+       ///
+       /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
+       /// any channels with this peer or check for new versions of LDK.
+       ///
+       /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        pub no_connection_possible: bool,
 }
 impl fmt::Debug for PeerHandleError {
index 96fda526d68d2d46369b9c9ec831d96551e86112..8a4ec2dc3518a4d9ca67ae7040c500563f7d9e3c 100644 (file)
@@ -138,7 +138,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                // ChannelManager only polls chain::Watch::release_pending_monitor_events when we
                // probe it for events, so we probe non-message events here (which should just be the
                // PaymentForwarded event).
-               expect_payment_forwarded!(nodes[1], nodes[0], Some(1000), true);
+               expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, true);
        } else {
                // Confirm the timeout tx and check that we fail the HTLC backwards
                let block = Block {
index 8716c7322f7f02335ebc24bbd149fec64b4258e0..063fa01608679e71011dbb39d89afbdfff1a4774 100644 (file)
@@ -110,7 +110,7 @@ fn updates_shutdown_wait() {
        assert!(updates.update_fee.is_none());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], Some(1000), false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
        check_added_monitors!(nodes[1], 1);
        let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
@@ -279,7 +279,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        assert!(updates.update_fee.is_none());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], Some(1000), false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
        check_added_monitors!(nodes[1], 1);
        let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
index dd97d964552189781b5967b5c19b6dd40f3bb87a..ef0c619b02e9f7a1b03cd6dd14b439e7799264dc 100644 (file)
@@ -100,12 +100,11 @@ pub enum ClosureReason {
                /// A developer-readable error message which we generated.
                err: String,
        },
-       /// The `PeerManager` informed us that we've disconnected from the peer. We close channels
-       /// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
-       /// peer again in the future or if the peer disconnected before we finished negotiating
-       /// the channel open. The first case may be caused by incompatible features which our
-       /// counterparty, or we, require.
-       //TODO: split between PeerUnconnectable/PeerDisconnected ?
+       /// The peer disconnected prior to funding completing. In this case the spec mandates that we
+       /// forget the channel entirely - we can attempt again if the peer reconnects.
+       ///
+       /// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
+       /// peer because of mutual incompatibility between us and our channel counterparty.
        DisconnectedPeer,
        /// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
        /// the ChannelManager deserialized.
@@ -370,9 +369,12 @@ pub enum Event {
        /// This event is generated when a payment has been successfully forwarded through us and a
        /// forwarding fee earned.
        PaymentForwarded {
-               /// The channel between the source node and us. Optional because versions prior to 0.0.107
-               /// do not serialize this field.
-               source_channel_id: Option<[u8; 32]>,
+               /// The incoming channel between the previous node and us. This is only `None` for events
+               /// generated or serialized by versions prior to 0.0.107.
+               prev_channel_id: Option<[u8; 32]>,
+               /// The outgoing channel between the next node and us. This is only `None` for events
+               /// generated or serialized by versions prior to 0.0.107.
+               next_channel_id: Option<[u8; 32]>,
                /// The fee, in milli-satoshis, which was earned as a result of the payment.
                ///
                /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
@@ -538,12 +540,13 @@ impl Writeable for Event {
                                        (0, VecWriteWrapper(outputs), required),
                                });
                        },
-                       &Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+                       &Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
                                7u8.write(writer)?;
                                write_tlv_fields!(writer, {
                                        (0, fee_earned_msat, option),
-                                       (1, source_channel_id, option),
+                                       (1, prev_channel_id, option),
                                        (2, claim_from_onchain_tx, required),
+                                       (3, next_channel_id, option),
                                });
                        },
                        &Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason } => {
@@ -703,14 +706,16 @@ impl MaybeReadable for Event {
                        7u8 => {
                                let f = || {
                                        let mut fee_earned_msat = None;
-                                       let mut source_channel_id = None;
+                                       let mut prev_channel_id = None;
                                        let mut claim_from_onchain_tx = false;
+                                       let mut next_channel_id = None;
                                        read_tlv_fields!(reader, {
                                                (0, fee_earned_msat, option),
-                                               (1, source_channel_id, option),
+                                               (1, prev_channel_id, option),
                                                (2, claim_from_onchain_tx, required),
+                                               (3, next_channel_id, option),
                                        });
-                                       Ok(Some(Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx }))
+                                       Ok(Some(Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id }))
                                };
                                f()
                        },
index 8dd74f2b53dc1697b5c8d4ad2e22eb231c84ba4e..c9b3866bec5c837e948b3a7f469515ed4b9b79f4 100644 (file)
@@ -1,4 +1,4 @@
-use std::sync::{TryLockResult, LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard};
+use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard};
 use std::sync::atomic::{AtomicUsize, Ordering};
 
 /// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
@@ -32,10 +32,6 @@ impl<T> FairRwLock<T> {
                res
        }
 
-       pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
-               self.lock.try_write()
-       }
-
        pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
                if self.waiting_writers.load(Ordering::Relaxed) != 0 {
                        let _write_queue_lock = self.lock.write();
index 0757983314e73f8698673e226a884c2752911547..b7ee02d2c1f5724ba0b0b6309ce2eb35ad321c1d 100644 (file)
@@ -25,7 +25,7 @@ pub mod persist;
 pub(crate) mod atomic_counter;
 pub(crate) mod byte_utils;
 pub(crate) mod chacha20;
-#[cfg(feature = "std")]
+#[cfg(all(not(test), feature = "std"))]
 pub(crate) mod fairrwlock;
 #[cfg(fuzzing)]
 pub mod zbase32;
index b5953834c8cacbefd28dfc4c892bd7d700b8e94c..3682a0e8f0af02d1ac654853bff16e3096f4025a 100644 (file)
@@ -164,7 +164,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                update_res
        }
 
-       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
 }
@@ -267,12 +267,12 @@ impl TestChannelMessageHandler {
                expected_msgs.as_mut().unwrap().push(ev);
        }
 
-       fn received_msg(&self, ev: wire::Message<()>) {
+       fn received_msg(&self, _ev: wire::Message<()>) {
                let mut msgs = self.expected_recv_msgs.lock().unwrap();
                if msgs.is_none() { return; }
                assert!(!msgs.as_ref().unwrap().is_empty(), "Received message when we weren't expecting one");
                #[cfg(test)]
-               assert_eq!(msgs.as_ref().unwrap()[0], ev);
+               assert_eq!(msgs.as_ref().unwrap()[0], _ev);
                msgs.as_mut().unwrap().remove(0);
        }
 }