]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Merge pull request #2847 from TheBlueMatt/2024-01-bindings-upstream
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 5 Feb 2024 23:49:19 +0000 (23:49 +0000)
committerGitHub <noreply@github.com>
Mon, 5 Feb 2024 23:49:19 +0000 (23:49 +0000)
Misc Tweaks for bindings

31 files changed:
.gitignore
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
lightning-background-processor/src/lib.rs
lightning-block-sync/src/http.rs
lightning-persister/src/fs_store.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/mod.rs
lightning/src/chain/transaction.rs
lightning/src/events/mod.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channel_id.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/onion_message/messenger.rs
lightning/src/routing/gossip.rs
lightning/src/util/macro_logger.rs
lightning/src/util/mod.rs
lightning/src/util/persist.rs
lightning/src/util/scid_utils.rs
lightning/src/util/test_utils.rs

index 7a6dc4c793032bc2c1ddf8e3e465201df6cf3543..fbeffa8a9c9f5e3fc0641149f7b2b8cd0afc2a8a 100644 (file)
@@ -13,3 +13,4 @@ lightning-rapid-gossip-sync/res/full_graph.lngossip
 lightning-custom-message/target
 lightning-transaction-sync/target
 no-std-check/target
+msrv-no-dev-deps-check/target
index b1126433be47173fbdb6f77d664734d7160ad966..e5ee77a59539154beaee240d2d3f5ca69948e6e5 100644 (file)
@@ -40,7 +40,7 @@ use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget,
 use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider};
 use lightning::events;
 use lightning::events::MessageSendEventsProvider;
-use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
+use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
 use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
 use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
 use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
@@ -186,7 +186,7 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
                self.chain_monitor.update_channel(funding_txo, update)
        }
 
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
 }
index 1a520557a1a37ba311b10e5f1968890648a114f7..6b8e61dce19ddb18519a328a0bf2517f34dce727 100644 (file)
@@ -22,8 +22,7 @@ use bitcoin::consensus::encode::deserialize;
 use bitcoin::network::constants::Network;
 
 use bitcoin::hashes::hex::FromHex;
-use bitcoin::hashes::Hash as TraitImport;
-use bitcoin::hashes::HashEngine as TraitImportEngine;
+use bitcoin::hashes::Hash as _;
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
@@ -646,7 +645,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                                                if let None = loss_detector.txids_confirmed.get(&funding_txid) {
                                                        let outpoint = OutPoint { txid: funding_txid, index: 0 };
                                                        for chan in channelmanager.list_channels() {
-                                                               if chan.channel_id == outpoint.to_channel_id() {
+                                                               if chan.funding_txo == Some(outpoint) {
                                                                        tx.version += 1;
                                                                        continue 'search_loop;
                                                                }
index 4bbebf46fe1fcc7ed54979d69b8f704292acd917..1300a67e2a16af0de614c7dce4ab37c6913686d7 100644 (file)
@@ -929,7 +929,7 @@ mod tests {
        use lightning::chain::transaction::OutPoint;
        use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
        use lightning::{get_event_msg, get_event};
-       use lightning::ln::PaymentHash;
+       use lightning::ln::{PaymentHash, ChannelId};
        use lightning::ln::channelmanager;
        use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
        use lightning::ln::features::{ChannelFeatures, NodeFeatures};
@@ -1415,7 +1415,7 @@ mod tests {
                }
 
                // Force-close the channel.
-               nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
 
                // Check that the force-close updates are persisted.
                check_persisted_data!(nodes[0].node, filepath.clone());
index 58d66686f0107e311e90bb2fc373603b723806bc..aa0d840adb0a168088e6e1d08d52a6ce22f7cf44 100644 (file)
@@ -288,7 +288,7 @@ impl HttpClient {
                        HttpMessageLength::Empty => { Vec::new() },
                        HttpMessageLength::ContentLength(length) => {
                                if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
-                                       return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
+                                       return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("invalid response length: {} bytes", length)));
                                } else {
                                        let mut content = vec![0; length];
                                        #[cfg(feature = "tokio")]
@@ -727,7 +727,7 @@ pub(crate) mod client_tests {
                match client.get::<BinaryResponse>("/foo", "foo.com").await {
                        Err(e) => {
                                assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
-                               assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
+                               assert_eq!(e.get_ref().unwrap().to_string(), "invalid response length: 8032001 bytes");
                        },
                        Ok(_) => panic!("Expected error"),
                }
index b5c6526207df007f10f33df3deac57629015323e..350b1cdd195636f937cd32a700da9464508fee8e 100644 (file)
@@ -450,7 +450,7 @@ mod tests {
                check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+               let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
 
                // Set the store's directory to read-only, which should result in
                // returning an unrecoverable failure when we then attempt to persist a
@@ -489,7 +489,7 @@ mod tests {
                check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+               let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
 
                // Create the store with an invalid directory name and test that the
                // channel fails to open because the directories fail to be created. There
index 14544754318ad5945622c42bd0300679aef6a6de..b71f10f58d455cc0fccff487884dd65f12c8d5cc 100644 (file)
@@ -31,6 +31,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
 use crate::chain::transaction::{OutPoint, TransactionData};
+use crate::ln::ChannelId;
 use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::events;
 use crate::events::{Event, EventHandler};
@@ -158,7 +159,7 @@ pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
        ///
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        /// [`Writeable::write`]: crate::util::ser::Writeable::write
-       fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+       fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
 
        /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
        /// update.
@@ -193,7 +194,7 @@ pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
        /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
        ///
        /// [`Writeable::write`]: crate::util::ser::Writeable::write
-       fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+       fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
 }
 
 struct MonitorHolder<ChannelSigner: WriteableEcdsaChannelSigner> {
@@ -287,7 +288,7 @@ pub struct ChainMonitor<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T:
        persister: P,
        /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
        /// from the user and not from a [`ChannelMonitor`].
-       pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
+       pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
        /// The best block height seen, used as a proxy for the passage of time.
        highest_chain_height: AtomicUsize,
 
@@ -471,12 +472,15 @@ where C::Target: chain::Filter,
                }
        }
 
-       /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
+       /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored.
        ///
        /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
        /// monitoring for on-chain state resolutions.
-       pub fn list_monitors(&self) -> Vec<OutPoint> {
-               self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
+       pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> {
+               self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| {
+                       let channel_id = monitor_holder.monitor.channel_id();
+                       (*outpoint, channel_id)
+               }).collect()
        }
 
        #[cfg(not(c_bindings))]
@@ -542,8 +546,9 @@ where C::Target: chain::Filter,
                                        // Completed event.
                                        return Ok(());
                                }
-                               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
-                                       funding_txo,
+                               let channel_id = monitor_data.monitor.channel_id();
+                               self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
+                                       funding_txo, channel_id,
                                        monitor_update_id: monitor_data.monitor.get_latest_update_id(),
                                }], monitor_data.monitor.get_counterparty_node_id()));
                        },
@@ -565,9 +570,14 @@ where C::Target: chain::Filter,
        #[cfg(any(test, fuzzing))]
        pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
                let monitors = self.monitors.read().unwrap();
-               let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
-               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
+               let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) {
+                       (m.monitor.get_counterparty_node_id(), m.monitor.channel_id())
+               } else {
+                       (None, ChannelId::v1_from_funding_outpoint(funding_txo))
+               };
+               self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
                        funding_txo,
+                       channel_id,
                        monitor_update_id,
                }], counterparty_node_id));
                self.event_notifier.notify();
@@ -753,11 +763,14 @@ where C::Target: chain::Filter,
        }
 
        fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
+               // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those
+               // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`.
+               let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
                // Update the monitor that watches the channel referred to by the given outpoint.
                let monitors = self.monitors.read().unwrap();
                match monitors.get(&funding_txo) {
                        None => {
-                               let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(funding_txo.to_channel_id()));
+                               let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id));
                                log_error!(logger, "Failed to update channel monitor: no such monitor registered");
 
                                // We should never ever trigger this from within ChannelManager. Technically a
@@ -815,7 +828,7 @@ where C::Target: chain::Filter,
                }
        }
 
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
                let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
                for monitor_state in self.monitors.read().unwrap().values() {
                        let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
@@ -829,8 +842,9 @@ where C::Target: chain::Filter,
                                let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
                                if monitor_events.len() > 0 {
                                        let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
+                                       let monitor_channel_id = monitor_state.monitor.channel_id();
                                        let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
-                                       pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
+                                       pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id));
                                }
                        }
                }
index c81a48b78ac8b4aa6fba1facae191f7190e27d51..e65b54f57013c58df60414c577e518254f17241b 100644 (file)
@@ -96,6 +96,11 @@ pub struct ChannelMonitorUpdate {
        ///
        /// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
        pub update_id: u64,
+       /// The channel ID associated with these updates.
+       ///
+       /// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and
+       /// always `Some` otherwise.
+       pub channel_id: Option<ChannelId>,
 }
 
 /// The update ID used for a [`ChannelMonitorUpdate`] that is either:
@@ -118,6 +123,7 @@ impl Writeable for ChannelMonitorUpdate {
                }
                write_tlv_fields!(w, {
                        (1, self.counterparty_node_id, option),
+                       (3, self.channel_id, option),
                });
                Ok(())
        }
@@ -134,10 +140,12 @@ impl Readable for ChannelMonitorUpdate {
                        }
                }
                let mut counterparty_node_id = None;
+               let mut channel_id = None;
                read_tlv_fields!(r, {
                        (1, counterparty_node_id, option),
+                       (3, channel_id, option),
                });
-               Ok(Self { update_id, counterparty_node_id, updates })
+               Ok(Self { update_id, counterparty_node_id, updates, channel_id })
        }
 }
 
@@ -158,6 +166,8 @@ pub enum MonitorEvent {
        Completed {
                /// The funding outpoint of the [`ChannelMonitor`] that was updated
                funding_txo: OutPoint,
+               /// The channel ID of the channel associated with the [`ChannelMonitor`]
+               channel_id: ChannelId,
                /// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
                /// [`ChannelMonitor::get_latest_update_id`].
                ///
@@ -172,6 +182,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
        (0, Completed) => {
                (0, funding_txo, required),
                (2, monitor_update_id, required),
+               (4, channel_id, required),
        },
 ;
        (2, HTLCEvent),
@@ -772,6 +783,7 @@ pub(crate) struct ChannelMonitorImpl<Signer: WriteableEcdsaChannelSigner> {
 
        channel_keys_id: [u8; 32],
        holder_revocation_basepoint: RevocationBasepoint,
+       channel_id: ChannelId,
        funding_info: (OutPoint, ScriptBuf),
        current_counterparty_commitment_txid: Option<Txid>,
        prev_counterparty_commitment_txid: Option<Txid>,
@@ -1097,6 +1109,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
                        (13, self.spendable_txids_confirmed, required_vec),
                        (15, self.counterparty_fulfilled_htlcs, required),
                        (17, self.initial_counterparty_commitment_info, option),
+                       (19, self.channel_id, required),
                });
 
                Ok(())
@@ -1160,7 +1173,7 @@ impl<'a, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
 
        pub(crate) fn from_impl<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>) -> Self {
                let peer_id = monitor_impl.counterparty_node_id;
-               let channel_id = Some(monitor_impl.funding_info.0.to_channel_id());
+               let channel_id = Some(monitor_impl.channel_id());
                WithChannelMonitor {
                        logger, peer_id, channel_id,
                }
@@ -1181,7 +1194,8 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                          funding_redeemscript: ScriptBuf, channel_value_satoshis: u64,
                          commitment_transaction_number_obscure_factor: u64,
                          initial_holder_commitment_tx: HolderCommitmentTransaction,
-                         best_block: BestBlock, counterparty_node_id: PublicKey) -> ChannelMonitor<Signer> {
+                         best_block: BestBlock, counterparty_node_id: PublicKey, channel_id: ChannelId,
+       ) -> ChannelMonitor<Signer> {
 
                assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
                let counterparty_payment_script = chan_utils::get_counterparty_payment_script(
@@ -1235,6 +1249,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
 
                        channel_keys_id,
                        holder_revocation_basepoint,
+                       channel_id,
                        funding_info,
                        current_counterparty_commitment_txid: None,
                        prev_counterparty_commitment_txid: None,
@@ -1386,6 +1401,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                self.inner.lock().unwrap().get_funding_txo().clone()
        }
 
+       /// Gets the channel_id of the channel this ChannelMonitor is monitoring for.
+       pub fn channel_id(&self) -> ChannelId {
+               self.inner.lock().unwrap().channel_id()
+       }
+
        /// Gets a list of txids, with their output scripts (in the order they appear in the
        /// transaction), which we must learn about spends of via block_connected().
        pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, ScriptBuf)>)> {
@@ -2834,7 +2854,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
                                        } else if !self.holder_tx_signed {
                                                log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
-                                               log_error!(logger, "    in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
+                                               log_error!(logger, "    in channel monitor for channel {}!", &self.channel_id());
                                                log_error!(logger, "    Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!");
                                        } else {
                                                // If we generated a MonitorEvent::HolderForceClosed, the ChannelManager
@@ -2880,6 +2900,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                &self.funding_info
        }
 
+       pub fn channel_id(&self) -> ChannelId {
+               self.channel_id
+       }
+
        fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
                // If we've detected a counterparty commitment tx on chain, we must include it in the set
                // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because
@@ -3642,7 +3666,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
                                        let mut balance_spendable_csv = None;
                                        log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
-                                               &self.funding_info.0.to_channel_id(), txid);
+                                               &self.channel_id(), txid);
                                        self.funding_spend_seen = true;
                                        let mut commitment_tx_to_counterparty_output = None;
                                        if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.to_consensus_u32() >> 8*3) as u8 == 0x20 {
@@ -3812,7 +3836,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                        log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
                                        self.pending_events.push(Event::SpendableOutputs {
                                                outputs: vec![descriptor],
-                                               channel_id: Some(self.funding_info.0.to_channel_id()),
+                                               channel_id: Some(self.channel_id()),
                                        });
                                        self.spendable_txids_confirmed.push(entry.txid);
                                },
@@ -4557,6 +4581,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
                let mut spendable_txids_confirmed = Some(Vec::new());
                let mut counterparty_fulfilled_htlcs = Some(HashMap::new());
                let mut initial_counterparty_commitment_info = None;
+               let mut channel_id = None;
                read_tlv_fields!(reader, {
                        (1, funding_spend_confirmed, option),
                        (3, htlcs_resolved_on_chain, optional_vec),
@@ -4567,6 +4592,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
                        (13, spendable_txids_confirmed, optional_vec),
                        (15, counterparty_fulfilled_htlcs, option),
                        (17, initial_counterparty_commitment_info, option),
+                       (19, channel_id, option),
                });
 
                // Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
@@ -4591,6 +4617,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
 
                        channel_keys_id,
                        holder_revocation_basepoint,
+                       channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)),
                        funding_info,
                        current_counterparty_commitment_txid,
                        prev_counterparty_commitment_txid,
@@ -4665,7 +4692,7 @@ mod tests {
        use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
        use crate::chain::transaction::OutPoint;
        use crate::sign::InMemorySigner;
-       use crate::ln::{PaymentPreimage, PaymentHash};
+       use crate::ln::{PaymentPreimage, PaymentHash, ChannelId};
        use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey};
        use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
        use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields};
@@ -4841,6 +4868,7 @@ mod tests {
                        htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap()))
                };
                let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+               let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
                let channel_parameters = ChannelTransactionParameters {
                        holder_pubkeys: keys.holder_channel_pubkeys.clone(),
                        holder_selected_contest_delay: 66,
@@ -4860,7 +4888,7 @@ mod tests {
                        Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
                        (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
                        &channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
-                       best_block, dummy_key);
+                       best_block, dummy_key, channel_id);
 
                let mut htlcs = preimages_slice_to_htlcs!(preimages[0..10]);
                let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs);
@@ -5090,6 +5118,7 @@ mod tests {
                        htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())),
                };
                let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+               let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
                let channel_parameters = ChannelTransactionParameters {
                        holder_pubkeys: keys.holder_channel_pubkeys.clone(),
                        holder_selected_contest_delay: 66,
@@ -5107,9 +5136,9 @@ mod tests {
                        Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
                        (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
                        &channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
-                       best_block, dummy_key);
+                       best_block, dummy_key, channel_id);
 
-               let chan_id = monitor.inner.lock().unwrap().funding_info.0.to_channel_id().clone();
+               let chan_id = monitor.inner.lock().unwrap().channel_id();
                let context_logger = WithChannelMonitor::from(&logger, &monitor);
                log_error!(context_logger, "This is an error");
                log_warn!(context_logger, "This is an error");
index dafce03ddb0d76ca1ca7f828f71b59c557345bab..368dd8497b037e4821c0c41b0de7f3e7585d8042 100644 (file)
@@ -17,6 +17,7 @@ use bitcoin::network::constants::Network;
 use bitcoin::secp256k1::PublicKey;
 
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent};
+use crate::ln::ChannelId;
 use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::chain::transaction::{OutPoint, TransactionData};
 
@@ -297,7 +298,7 @@ pub trait Watch<ChannelSigner: WriteableEcdsaChannelSigner> {
        ///
        /// For details on asynchronous [`ChannelMonitor`] updating and returning
        /// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>;
 }
 
 /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
index 5bef97792d3944252837f67d8e7ebefe8a797e57..17815207a8dcf92355dc762cdfd0e66afc0c33f5 100644 (file)
@@ -9,9 +9,7 @@
 
 //! Types describing on-chain transactions.
 
-use crate::ln::ChannelId;
 use bitcoin::hash_types::Txid;
-use bitcoin::hashes::Hash;
 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
 use bitcoin::blockdata::transaction::Transaction;
 
@@ -58,11 +56,6 @@ pub struct OutPoint {
 }
 
 impl OutPoint {
-       /// Convert an `OutPoint` to a lightning channel id.
-       pub fn to_channel_id(&self) -> ChannelId {
-               ChannelId::v1_from_funding_txid(self.txid.as_byte_array(), self.index)
-       }
-
        /// Converts this OutPoint into the OutPoint field as used by rust-bitcoin
        ///
        /// This is not exported to bindings users as the same type is used universally in the C bindings 
@@ -86,6 +79,7 @@ impl_writeable!(OutPoint, { txid, index });
 #[cfg(test)]
 mod tests {
        use crate::chain::transaction::OutPoint;
+       use crate::ln::ChannelId;
 
        use bitcoin::blockdata::transaction::Transaction;
        use bitcoin::consensus::encode;
@@ -94,13 +88,13 @@ mod tests {
        #[test]
        fn test_channel_id_calculation() {
                let tx: Transaction = encode::deserialize(&<Vec<u8>>::from_hex("020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000").unwrap()[..]).unwrap();
-               assert_eq!(&OutPoint {
+               assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint {
                        txid: tx.txid(),
                        index: 0
-               }.to_channel_id().0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
-               assert_eq!(&OutPoint {
+               }).0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
+               assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint {
                        txid: tx.txid(),
                        index: 1
-               }.to_channel_id().0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
+               }).0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
        }
 }
index 77f6937c54558aae85d10a3c1c8cc9c05bc5d424..3bc70014f49aab63105ae94409e94025677ff8ab 100644 (file)
@@ -184,8 +184,15 @@ pub enum ClosureReason {
        HolderForceClosed,
        /// The channel was closed after negotiating a cooperative close and we've now broadcasted
        /// the cooperative close transaction. Note the shutdown may have been initiated by us.
-       //TODO: split between CounterpartyInitiated/LocallyInitiated
-       CooperativeClosure,
+       // Can be removed once we disallow downgrading to 0.0.121
+       LegacyCooperativeClosure,
+       /// The channel was closed after negotiating a cooperative close and we've now broadcasted
+       /// the cooperative close transaction. This indicates that the shutdown was initiated by our
+       /// counterparty.
+       CounterpartyInitiatedCooperativeClosure,
+       /// The channel was closed after negotiating a cooperative close and we've now broadcasted
+       /// the cooperative close transaction. This indicates that the shutdown was initiated by us.
+       LocallyInitiatedCooperativeClosure,
        /// A commitment transaction was confirmed on chain, closing the channel. Most likely this
        /// commitment transaction came from our counterparty, but it may also have come from
        /// a copy of our own `ChannelMonitor`.
@@ -230,7 +237,9 @@ impl core::fmt::Display for ClosureReason {
                                f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg))
                        },
                        ClosureReason::HolderForceClosed => f.write_str("user manually force-closed the channel"),
-                       ClosureReason::CooperativeClosure => f.write_str("the channel was cooperatively closed"),
+                       ClosureReason::LegacyCooperativeClosure => f.write_str("the channel was cooperatively closed"),
+                       ClosureReason::CounterpartyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by our peer"),
+                       ClosureReason::LocallyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by us"),
                        ClosureReason::CommitmentTxConfirmed => f.write_str("commitment or closing transaction was confirmed on chain."),
                        ClosureReason::FundingTimedOut => write!(f, "funding transaction failed to confirm within {} blocks", FUNDING_CONF_DEADLINE_BLOCKS),
                        ClosureReason::ProcessingError { err } => {
@@ -250,12 +259,14 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
        (1, FundingTimedOut) => {},
        (2, HolderForceClosed) => {},
        (6, CommitmentTxConfirmed) => {},
-       (4, CooperativeClosure) => {},
+       (4, LegacyCooperativeClosure) => {},
        (8, ProcessingError) => { (1, err, required) },
        (10, DisconnectedPeer) => {},
        (12, OutdatedChannelManager) => {},
        (13, CounterpartyCoopClosedUnfundedChannel) => {},
-       (15, FundingBatchClosure) => {}
+       (15, FundingBatchClosure) => {},
+       (17, CounterpartyInitiatedCooperativeClosure) => {},
+       (19, LocallyInitiatedCooperativeClosure) => {},
 );
 
 /// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
@@ -783,7 +794,7 @@ pub enum Event {
                /// The outgoing channel between the next node and us. This is only `None` for events
                /// generated or serialized by versions prior to 0.0.107.
                next_channel_id: Option<ChannelId>,
-               /// The fee, in milli-satoshis, which was earned as a result of the payment.
+               /// The total fee, in milli-satoshis, which was earned as a result of the payment.
                ///
                /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
                /// was pending, the amount the next hop claimed will have been rounded down to the nearest
@@ -794,15 +805,29 @@ pub enum Event {
                /// If the channel which sent us the payment has been force-closed, we will claim the funds
                /// via an on-chain transaction. In that case we do not yet know the on-chain transaction
                /// fees which we will spend and will instead set this to `None`. It is possible duplicate
-               /// `PaymentForwarded` events are generated for the same payment iff `fee_earned_msat` is
+               /// `PaymentForwarded` events are generated for the same payment iff `total_fee_earned_msat` is
                /// `None`.
-               fee_earned_msat: Option<u64>,
+               total_fee_earned_msat: Option<u64>,
+               /// The share of the total fee, in milli-satoshis, which was withheld in addition to the
+               /// forwarding fee.
+               ///
+               /// This will only be `Some` if we forwarded an intercepted HTLC with less than the
+               /// expected amount. This means our counterparty accepted to receive less than the invoice
+               /// amount, e.g., by claiming the payment featuring a corresponding
+               /// [`PaymentClaimable::counterparty_skimmed_fee_msat`].
+               ///
+               /// Will also always be `None` for events serialized with LDK prior to version 0.0.122.
+               ///
+               /// The caveat described above the `total_fee_earned_msat` field applies here as well.
+               ///
+               /// [`PaymentClaimable::counterparty_skimmed_fee_msat`]: Self::PaymentClaimable::counterparty_skimmed_fee_msat
+               skimmed_fee_msat: Option<u64>,
                /// If this is `true`, the forwarded HTLC was claimed by our counterparty via an on-chain
                /// transaction.
                claim_from_onchain_tx: bool,
                /// The final amount forwarded, in milli-satoshis, after the fee is deducted.
                ///
-               /// The caveat described above the `fee_earned_msat` field applies here as well.
+               /// The caveat described above the `total_fee_earned_msat` field applies here as well.
                outbound_amount_forwarded_msat: Option<u64>,
        },
        /// Used to indicate that a channel with the given `channel_id` is being opened and pending
@@ -831,6 +856,10 @@ pub enum Event {
                counterparty_node_id: PublicKey,
                /// The outpoint of the channel's funding transaction.
                funding_txo: OutPoint,
+               /// The features that this channel will operate with.
+               ///
+               /// Will be `None` for channels created prior to LDK version 0.0.122.
+               channel_type: Option<ChannelTypeFeatures>,
        },
        /// Used to indicate that a channel with the given `channel_id` is ready to
        /// be used. This event is emitted either when the funding transaction has been confirmed
@@ -1083,16 +1112,17 @@ impl Writeable for Event {
                                });
                        }
                        &Event::PaymentForwarded {
-                               fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
-                               next_channel_id, outbound_amount_forwarded_msat
+                               total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+                               next_channel_id, outbound_amount_forwarded_msat, skimmed_fee_msat,
                        } => {
                                7u8.write(writer)?;
                                write_tlv_fields!(writer, {
-                                       (0, fee_earned_msat, option),
+                                       (0, total_fee_earned_msat, option),
                                        (1, prev_channel_id, option),
                                        (2, claim_from_onchain_tx, required),
                                        (3, next_channel_id, option),
                                        (5, outbound_amount_forwarded_msat, option),
+                                       (7, skimmed_fee_msat, option),
                                });
                        },
                        &Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason,
@@ -1199,10 +1229,14 @@ impl Writeable for Event {
                                        (6, channel_type, required),
                                });
                        },
-                       &Event::ChannelPending { ref channel_id, ref user_channel_id, ref former_temporary_channel_id, ref counterparty_node_id, ref funding_txo } => {
+                       &Event::ChannelPending { ref channel_id, ref user_channel_id,
+                               ref former_temporary_channel_id, ref counterparty_node_id, ref funding_txo,
+                               ref channel_type
+                       } => {
                                31u8.write(writer)?;
                                write_tlv_fields!(writer, {
                                        (0, channel_id, required),
+                                       (1, channel_type, option),
                                        (2, user_channel_id, required),
                                        (4, former_temporary_channel_id, required),
                                        (6, counterparty_node_id, required),
@@ -1384,21 +1418,23 @@ impl MaybeReadable for Event {
                        },
                        7u8 => {
                                let f = || {
-                                       let mut fee_earned_msat = None;
+                                       let mut total_fee_earned_msat = None;
                                        let mut prev_channel_id = None;
                                        let mut claim_from_onchain_tx = false;
                                        let mut next_channel_id = None;
                                        let mut outbound_amount_forwarded_msat = None;
+                                       let mut skimmed_fee_msat = None;
                                        read_tlv_fields!(reader, {
-                                               (0, fee_earned_msat, option),
+                                               (0, total_fee_earned_msat, option),
                                                (1, prev_channel_id, option),
                                                (2, claim_from_onchain_tx, required),
                                                (3, next_channel_id, option),
                                                (5, outbound_amount_forwarded_msat, option),
+                                               (7, skimmed_fee_msat, option),
                                        });
                                        Ok(Some(Event::PaymentForwarded {
-                                               fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
-                                               outbound_amount_forwarded_msat
+                                               total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
+                                               outbound_amount_forwarded_msat, skimmed_fee_msat,
                                        }))
                                };
                                f()
@@ -1589,8 +1625,10 @@ impl MaybeReadable for Event {
                                        let mut former_temporary_channel_id = None;
                                        let mut counterparty_node_id = RequiredWrapper(None);
                                        let mut funding_txo = RequiredWrapper(None);
+                                       let mut channel_type = None;
                                        read_tlv_fields!(reader, {
                                                (0, channel_id, required),
+                                               (1, channel_type, option),
                                                (2, user_channel_id, required),
                                                (4, former_temporary_channel_id, required),
                                                (6, counterparty_node_id, required),
@@ -1602,7 +1640,8 @@ impl MaybeReadable for Event {
                                                user_channel_id,
                                                former_temporary_channel_id,
                                                counterparty_node_id: counterparty_node_id.0.unwrap(),
-                                               funding_txo: funding_txo.0.unwrap()
+                                               funding_txo: funding_txo.0.unwrap(),
+                                               channel_type,
                                        }))
                                };
                                f()
index af827b8cebbc3a60cfe2a46ba02ad37cc0d59abf..4d9316db79f8f9157aff3989001a116283e23d82 100644 (file)
@@ -21,7 +21,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
 use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
 use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
 use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::errors::APIError;
@@ -1861,7 +1861,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
-       let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+       let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
        check_added_monitors!(nodes[1], 1);
 
@@ -1955,8 +1955,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        send_payment(&nodes[0], &[&nodes[1]], 8000000);
        close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2634,8 +2634,8 @@ fn test_temporary_error_during_shutdown() {
        assert_eq!(txn_a, txn_b);
        assert_eq!(txn_a.len(), 1);
        check_spends!(txn_a[0], funding_tx);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -3404,7 +3404,8 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
        let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
        let mut events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
-       expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), close_during_reload, false);
+       expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000),
+               None, close_during_reload, false);
        if close_during_reload {
                match events[0] {
                        Event::ChannelClosed { .. } => {},
index ec4f26664a76b053bf7313695e686bdfe7b37c22..f9c512d7f84cce37f87363680212e920791f41ae 100644 (file)
@@ -267,7 +267,7 @@ enum HTLCUpdateAwaitingACK {
 }
 
 macro_rules! define_state_flags {
-       ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
+       ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
                #[doc = $flag_type_doc]
                #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
                struct $flag_type(u32);
@@ -296,15 +296,18 @@ macro_rules! define_state_flags {
 
                        #[allow(unused)]
                        fn is_empty(&self) -> bool { self.0 == 0 }
-
                        #[allow(unused)]
                        fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
+                       #[allow(unused)]
+                       fn set(&mut self, flag: Self) { *self |= flag }
+                       #[allow(unused)]
+                       fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
                }
 
-               impl core::ops::Not for $flag_type {
-                       type Output = Self;
-                       fn not(self) -> Self::Output { Self(!self.0) }
-               }
+               $(
+                       define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
+               )*
+
                impl core::ops::BitOr for $flag_type {
                        type Output = Self;
                        fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
@@ -323,8 +326,28 @@ macro_rules! define_state_flags {
        ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
                define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
        };
+       ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
+               impl $flag_type {
+                       #[allow(unused)]
+                       fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
+                       #[allow(unused)]
+                       fn $set(&mut self) { self.set($flag_type::new() | $flag) }
+                       #[allow(unused)]
+                       fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
+               }
+       };
        ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
                define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
+
+               define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
+                       is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
+               define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
+                       is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
+               define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+                       is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
+               define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
+                       is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
+
                impl core::ops::BitOr<FundedStateFlags> for $flag_type {
                        type Output = Self;
                        fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
@@ -371,15 +394,19 @@ define_state_flags!(
        "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
        FundedStateFlags, [
                ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
-                       until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
+                       until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
+                       is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
                ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
                        somewhere and we should pause sending any outbound messages until they've managed to \
-                       complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
+                       complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
+                       is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
                ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
                        any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
-                       message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
+                       message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
+                       is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
                ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
-                       the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
+                       the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
+                       is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
        ]
 );
 
@@ -387,9 +414,9 @@ define_state_flags!(
        "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
        NegotiatingFundingFlags, [
                ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
-                       OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
+                       OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
                ("Indicates we have received their `open_channel`/`accept_channel` message.",
-                       THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
+                       THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
        ]
 );
 
@@ -398,13 +425,16 @@ define_state_flags!(
        FUNDED_STATE, AwaitingChannelReadyFlags, [
                ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
                        `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
-                       THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
+                       THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
+                       is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
                ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
                        `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
-                       OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
+                       OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
+                       is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
                ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
                        is being held until all channels in the batch have received `funding_signed` and have \
-                       their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
+                       their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
+                       is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
        ]
 );
 
@@ -415,10 +445,13 @@ define_state_flags!(
                        `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
                        messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
                        implicit ACK, so instead we have to hold them away temporarily to be sent later.",
-                       AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
+                       AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
+                       is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
        ]
 );
 
+// Note that the order of this enum is implicitly defined by where each variant is placed. Take this
+// into account when introducing new states and update `test_channel_state_order` accordingly.
 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
 enum ChannelState {
        /// We are negotiating the parameters required for the channel prior to funding it.
@@ -439,12 +472,12 @@ enum ChannelState {
 }
 
 macro_rules! impl_state_flag {
-       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
+       ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
                #[allow(unused)]
                fn $get(&self) -> bool {
                        match self {
                                $(
-                                       ChannelState::$state(flags) => flags.is_set($state_flag.into()),
+                                       ChannelState::$state(flags) => flags.$get(),
                                )*
                                _ => false,
                        }
@@ -453,7 +486,7 @@ macro_rules! impl_state_flag {
                fn $set(&mut self) {
                        match self {
                                $(
-                                       ChannelState::$state(flags) => *flags |= $state_flag,
+                                       ChannelState::$state(flags) => flags.$set(),
                                )*
                                _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
                        }
@@ -462,17 +495,17 @@ macro_rules! impl_state_flag {
                fn $clear(&mut self) {
                        match self {
                                $(
-                                       ChannelState::$state(flags) => *flags &= !($state_flag),
+                                       ChannelState::$state(flags) => { let _ = flags.$clear(); },
                                )*
                                _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
                        }
                }
        };
-       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
-               impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
+       ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
+               impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
        };
-       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
-               impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
+       ($get: ident, $set: ident, $clear: ident, $state: ident) => {
+               impl_state_flag!($get, $set, $clear, [$state]);
        };
 }
 
@@ -523,35 +556,27 @@ impl ChannelState {
                }
        }
 
-       fn should_force_holding_cell(&self) -> bool {
+       fn can_generate_new_commitment(&self) -> bool {
                match self {
                        ChannelState::ChannelReady(flags) =>
-                               flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
-                                       flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
-                                       flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
+                               !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
+                                       !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
+                                       !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
                        _ => {
-                               debug_assert!(false, "The holding cell is only valid within ChannelReady");
+                               debug_assert!(false, "Can only generate new commitment within ChannelReady");
                                false
                        },
                }
        }
 
-       impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
-               FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
-       impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
-               FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
-       impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
-               FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
-       impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
-               FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
-       impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
-               AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
-       impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
-               AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
-       impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
-               AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
-       impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
-               ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
+       impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
+       impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
+       impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
+       impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
+       impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
+       impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
+       impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
+       impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
 }
 
 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
@@ -816,7 +841,7 @@ pub(super) struct ReestablishResponses {
 pub(crate) struct ShutdownResult {
        pub(crate) closure_reason: ClosureReason,
        /// A channel monitor update to apply.
-       pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
+       pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
        /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
        pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
        /// An unbroadcasted batch funding transaction id. The closure of this channel should be
@@ -1231,6 +1256,9 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        // We track whether we already emitted a `ChannelReady` event.
        channel_ready_event_emitted: bool,
 
+       /// Some if we initiated to shut down the channel.
+       local_initiated_shutdown: Option<()>,
+
        /// The unique identifier used to re-derive the private key material for the channel through
        /// [`SignerProvider::derive_channel_signer`].
        channel_keys_id: [u8; 32],
@@ -2388,16 +2416,13 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        // funding transaction, don't return a funding txo (which prevents providing the
                        // monitor update to the user, even if we return one).
                        // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
-                       let generate_monitor_update = match self.channel_state {
-                               ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
-                               _ => false,
-                       };
-                       if generate_monitor_update {
+                       if !self.channel_state.is_pre_funded_state() {
                                self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
-                               Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+                               Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
                                        update_id: self.latest_monitor_update_id,
                                        counterparty_node_id: Some(self.counterparty_node_id),
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+                                       channel_id: Some(self.channel_id()),
                                }))
                        } else { None }
                } else { None };
@@ -2709,7 +2734,7 @@ impl<SP: Deref> Channel<SP> where
        where L::Target: Logger {
                // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
                // (see equivalent if condition there).
-               assert!(self.context.channel_state.should_force_holding_cell());
+               assert!(!self.context.channel_state.can_generate_new_commitment());
                let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
                let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
                self.context.latest_monitor_update_id = mon_update_id;
@@ -2777,9 +2802,10 @@ impl<SP: Deref> Channel<SP> where
                        updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
                                payment_preimage: payment_preimage_arg.clone(),
                        }],
+                       channel_id: Some(self.context.channel_id()),
                };
 
-               if self.context.channel_state.should_force_holding_cell() {
+               if !self.context.channel_state.can_generate_new_commitment() {
                        // Note that this condition is the same as the assertion in
                        // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
                        // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
@@ -2953,7 +2979,7 @@ impl<SP: Deref> Channel<SP> where
                        return Ok(None);
                }
 
-               if self.context.channel_state.should_force_holding_cell() {
+               if !self.context.channel_state.can_generate_new_commitment() {
                        debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
                        force_holding_cell = true;
                }
@@ -3049,12 +3075,12 @@ impl<SP: Deref> Channel<SP> where
                let mut check_reconnection = false;
                match &self.context.channel_state {
                        ChannelState::AwaitingChannelReady(flags) => {
-                               let flags = *flags & !FundedStateFlags::ALL;
+                               let flags = flags.clone().clear(FundedStateFlags::ALL.into());
                                debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
-                               if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
+                               if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
                                        // If we reconnected before sending our `channel_ready` they may still resend theirs.
                                        check_reconnection = true;
-                               } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
+                               } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
                                        self.context.channel_state.set_their_channel_ready();
                                } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
                                        self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
@@ -3304,7 +3330,7 @@ impl<SP: Deref> Channel<SP> where
                Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
        }
 
-       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
+       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
                }
@@ -3312,7 +3338,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
                }
 
-               self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
+               self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
        }
 
        pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
@@ -3515,7 +3541,8 @@ impl<SP: Deref> Channel<SP> where
                                htlc_outputs: htlcs_and_sigs,
                                claimed_htlcs,
                                nondust_htlc_sources,
-                       }]
+                       }],
+                       channel_id: Some(self.context.channel_id()),
                };
 
                self.context.cur_holder_commitment_transaction_number -= 1;
@@ -3570,7 +3597,7 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
+               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
                        self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
        }
@@ -3591,6 +3618,7 @@ impl<SP: Deref> Channel<SP> where
                                update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
                                counterparty_node_id: Some(self.context.counterparty_node_id),
                                updates: Vec::new(),
+                               channel_id: Some(self.context.channel_id()),
                        };
 
                        let mut htlc_updates = Vec::new();
@@ -3769,6 +3797,7 @@ impl<SP: Deref> Channel<SP> where
                                idx: self.context.cur_counterparty_commitment_transaction_number + 1,
                                secret: msg.per_commitment_secret,
                        }],
+                       channel_id: Some(self.context.channel_id()),
                };
 
                // Update state now that we've passed all the can-fail calls...
@@ -4180,8 +4209,8 @@ impl<SP: Deref> Channel<SP> where
                // first received the funding_signed.
                let mut funding_broadcastable =
                        if self.context.is_outbound() &&
-                               matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
-                               matches!(self.context.channel_state, ChannelState::ChannelReady(_))
+                               (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
+                               matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
                        {
                                self.context.funding_transaction.take()
                        } else { None };
@@ -4826,6 +4855,7 @@ impl<SP: Deref> Channel<SP> where
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
+                               channel_id: Some(self.context.channel_id()),
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
                        self.push_ret_blockable_mon_update(monitor_update)
@@ -4932,11 +4962,17 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
+               let closure_reason = if self.initiated_shutdown() {
+                       ClosureReason::LocallyInitiatedCooperativeClosure
+               } else {
+                       ClosureReason::CounterpartyInitiatedCooperativeClosure
+               };
+
                assert!(self.context.shutdown_scriptpubkey.is_some());
                if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
                        if last_fee == msg.fee_satoshis {
                                let shutdown_result = ShutdownResult {
-                                       closure_reason: ClosureReason::CooperativeClosure,
+                                       closure_reason,
                                        monitor_update: None,
                                        dropped_outbound_htlcs: Vec::new(),
                                        unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
@@ -4971,7 +5007,7 @@ impl<SP: Deref> Channel<SP> where
                                                        .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
                                                let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
                                                        let shutdown_result = ShutdownResult {
-                                                               closure_reason: ClosureReason::CooperativeClosure,
+                                                               closure_reason,
                                                                monitor_update: None,
                                                                dropped_outbound_htlcs: Vec::new(),
                                                                unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
@@ -5191,7 +5227,7 @@ impl<SP: Deref> Channel<SP> where
                if !self.is_awaiting_monitor_update() { return false; }
                if matches!(
                        self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
-                       if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
+                       if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
                ) {
                        // If we're not a 0conf channel, we'll be waiting on a monitor update with only
                        // AwaitingChannelReady set, though our peer could have sent their channel_ready.
@@ -5236,6 +5272,11 @@ impl<SP: Deref> Channel<SP> where
                self.context.channel_state.is_local_shutdown_sent()
        }
 
+       /// Returns true if we initiated to shut down the channel.
+       pub fn initiated_shutdown(&self) -> bool {
+               self.context.local_initiated_shutdown.is_some()
+       }
+
        /// Returns true if this channel is fully shut down. True here implies that no further actions
        /// may/will be taken on this channel, and thus this object should be freed. Any future changes
        /// will be handled appropriately by the chain monitor.
@@ -5277,14 +5318,14 @@ impl<SP: Deref> Channel<SP> where
 
                // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
                // channel_ready until the entire batch is ready.
-               let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
+               let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
                        self.context.channel_state.set_our_channel_ready();
                        true
-               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
+               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
                        self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
                        self.context.update_time_counter += 1;
                        true
-               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
+               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else {
@@ -5859,7 +5900,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
                }
 
-               let need_holding_cell = self.context.channel_state.should_force_holding_cell();
+               let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
                log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
                        payment_hash, amount_msat,
                        if force_holding_cell { "into holding cell" }
@@ -5965,7 +6006,8 @@ impl<SP: Deref> Channel<SP> where
                                feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
                                to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
                                to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
-                       }]
+                       }],
+                       channel_id: Some(self.context.channel_id()),
                };
                self.context.channel_state.set_awaiting_remote_revoke();
                monitor_update
@@ -6149,6 +6191,7 @@ impl<SP: Deref> Channel<SP> where
                // From here on out, we may not fail!
                self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
                self.context.channel_state.set_local_shutdown_sent();
+               self.context.local_initiated_shutdown = Some(());
                self.context.update_time_counter += 1;
 
                let monitor_update = if update_shutdown_script {
@@ -6159,6 +6202,7 @@ impl<SP: Deref> Channel<SP> where
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
+                               channel_id: Some(self.context.channel_id()),
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
                        self.push_ret_blockable_mon_update(monitor_update)
@@ -6408,6 +6452,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                                channel_keys_id,
 
                                blocked_monitor_updates: Vec::new(),
+                               local_initiated_shutdown: None,
                        },
                        unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                })
@@ -6475,7 +6520,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                // Now that we're past error-generating stuff, update our local state:
 
                self.context.channel_state = ChannelState::FundingNegotiated;
-               self.context.channel_id = funding_txo.to_channel_id();
+               self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
 
                // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
                // We can skip this if it is a zero-conf channel.
@@ -6814,7 +6859,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                                                          &self.context.channel_transaction_parameters,
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
                                                          obscure_factor,
-                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id);
+                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
                channel_monitor.provide_initial_counterparty_commitment_tx(
                        counterparty_initial_bitcoin_tx.txid, Vec::new(),
                        self.context.cur_counterparty_commitment_transaction_number,
@@ -7208,6 +7253,8 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                channel_type,
                                channel_keys_id,
 
+                               local_initiated_shutdown: None,
+
                                blocked_monitor_updates: Vec::new(),
                        },
                        unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
@@ -7356,7 +7403,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                // Now that we're past error-generating stuff, update our local state:
 
                self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
-               self.context.channel_id = funding_txo.to_channel_id();
+               self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
                self.context.cur_counterparty_commitment_transaction_number -= 1;
                self.context.cur_holder_commitment_transaction_number -= 1;
 
@@ -7374,7 +7421,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                                          &self.context.channel_transaction_parameters,
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
                                                          obscure_factor,
-                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id);
+                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
                channel_monitor.provide_initial_counterparty_commitment_tx(
                        counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
                        self.context.cur_counterparty_commitment_transaction_number + 1,
@@ -7477,6 +7524,8 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        let mut channel_state = self.context.channel_state;
                        if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
                                channel_state.set_peer_disconnected();
+                       } else {
+                               debug_assert!(false, "Pre-funded/shutdown channels should not be written");
                        }
                        channel_state.to_u32().write(writer)?;
                }
@@ -7780,6 +7829,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (39, pending_outbound_blinding_points, optional_vec),
                        (41, holding_cell_blinding_points, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
+                       (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122
                });
 
                Ok(())
@@ -8067,6 +8117,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                let mut is_batch_funding: Option<()> = None;
 
+               let mut local_initiated_shutdown: Option<()> = None;
+
                let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
                let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
 
@@ -8101,6 +8153,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (39, pending_outbound_blinding_points_opt, optional_vec),
                        (41, holding_cell_blinding_points_opt, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
+                       (45, local_initiated_shutdown, option),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -8331,6 +8384,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                channel_type: channel_type.unwrap(),
                                channel_keys_id,
 
+                               local_initiated_shutdown,
+
                                blocked_monitor_updates: blocked_monitor_updates.unwrap(),
                        }
                })
@@ -8378,6 +8433,18 @@ mod tests {
        use bitcoin::address::{WitnessProgram, WitnessVersion};
        use crate::prelude::*;
 
+       #[test]
+       fn test_channel_state_order() {
+               use crate::ln::channel::NegotiatingFundingFlags;
+               use crate::ln::channel::AwaitingChannelReadyFlags;
+               use crate::ln::channel::ChannelReadyFlags;
+
+               assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
+               assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
+               assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
+               assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
+       }
+
        struct TestFeeEstimator {
                fee_est: u32
        }
@@ -8885,17 +8952,34 @@ mod tests {
        fn blinding_point_skimmed_fee_malformed_ser() {
                // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
                // properly.
+               let logger = test_utils::TestLogger::new();
                let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
                let secp_ctx = Secp256k1::new();
                let seed = [42; 32];
                let network = Network::Testnet;
+               let best_block = BestBlock::from_network(network);
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
 
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
                let features = channelmanager::provided_init_features(&config);
-               let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
-               let mut chan = Channel { context: outbound_chan.context };
+               let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
+               ).unwrap();
+               let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
+                       &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
+               ).unwrap();
+               outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
+               let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
+               }]};
+               let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+               let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
+               let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
+                       Ok((chan, _, _)) => chan,
+                       Err((_, e)) => panic!("{}", e),
+               };
 
                let dummy_htlc_source = HTLCSource::OutboundRoute {
                        path: Path {
index 8df6d75ef5e80b4d4acb2cb39b2358be9c638131..19003961ff1a52bced58e52622dd59add517bfed 100644 (file)
@@ -9,11 +9,13 @@
 
 //! ChannelId definition.
 
+use crate::chain::transaction::OutPoint;
+use crate::io;
 use crate::ln::msgs::DecodeError;
 use crate::sign::EntropySource;
 use crate::util::ser::{Readable, Writeable, Writer};
 
-use crate::io;
+use bitcoin::hashes::Hash as _;
 use core::fmt;
 use core::ops::Deref;
 
@@ -40,6 +42,11 @@ impl ChannelId {
                Self(res)
        }
 
+       /// Create _v1_ channel ID from a funding tx outpoint
+       pub fn v1_from_funding_outpoint(outpoint: OutPoint) -> Self {
+               Self::v1_from_funding_txid(outpoint.txid.as_byte_array(), outpoint.index)
+       }
+
        /// Create a _temporary_ channel ID randomly, based on an entropy source.
        pub fn temporary_from_entropy_source<ES: Deref>(entropy_source: &ES) -> Self
        where ES::Target: EntropySource {
index 5724b13fbaf0dacb833522c677441cd85e1a21d1..dcc63f22cdce78b53ba11412c571100712bcfa3a 100644 (file)
@@ -288,6 +288,7 @@ pub(super) struct PendingAddHTLCInfo {
        // Note that this may be an outbound SCID alias for the associated channel.
        prev_short_channel_id: u64,
        prev_htlc_id: u64,
+       prev_channel_id: ChannelId,
        prev_funding_outpoint: OutPoint,
        prev_user_channel_id: u128,
 }
@@ -328,6 +329,7 @@ pub(crate) struct HTLCPreviousHopData {
        incoming_packet_shared_secret: [u8; 32],
        phantom_shared_secret: Option<[u8; 32]>,
        blinded_failure: Option<BlindedFailure>,
+       channel_id: ChannelId,
 
        // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
        // channel with a preimage provided by the forward channel.
@@ -368,7 +370,7 @@ struct ClaimableHTLC {
 impl From<&ClaimableHTLC> for events::ClaimedHTLC {
        fn from(val: &ClaimableHTLC) -> Self {
                events::ClaimedHTLC {
-                       channel_id: val.prev_hop.outpoint.to_channel_id(),
+                       channel_id: val.prev_hop.channel_id,
                        user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
                        cltv_expiry: val.cltv_expiry,
                        value_msat: val.value,
@@ -707,7 +709,7 @@ enum BackgroundEvent {
        ///
        /// Note that any such events are lost on shutdown, so in general they must be updates which
        /// are regenerated on startup.
-       ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+       ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
        /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
        /// channel to continue normal operation.
        ///
@@ -721,6 +723,7 @@ enum BackgroundEvent {
        MonitorUpdateRegeneratedOnStartup {
                counterparty_node_id: PublicKey,
                funding_txo: OutPoint,
+               channel_id: ChannelId,
                update: ChannelMonitorUpdate
        },
        /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
@@ -749,7 +752,7 @@ pub(crate) enum MonitorUpdateCompletionAction {
        /// outbound edge.
        EmitEventAndFreeOtherChannel {
                event: events::Event,
-               downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
+               downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>,
        },
        /// Indicates we should immediately resume the operation of another channel, unless there is
        /// some other reason why the channel is blocked. In practice this simply means immediately
@@ -767,6 +770,7 @@ pub(crate) enum MonitorUpdateCompletionAction {
                downstream_counterparty_node_id: PublicKey,
                downstream_funding_outpoint: OutPoint,
                blocking_action: RAAMonitorUpdateBlockingAction,
+               downstream_channel_id: ChannelId,
        },
 }
 
@@ -778,6 +782,9 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
                (0, downstream_counterparty_node_id, required),
                (2, downstream_funding_outpoint, required),
                (4, blocking_action, required),
+               // Note that by the time we get past the required read above, downstream_funding_outpoint will be
+               // filled in, so we can safely unwrap it here.
+               (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
        },
        (2, EmitEventAndFreeOtherChannel) => {
                (0, event, upgradable_required),
@@ -795,12 +802,16 @@ pub(crate) enum EventCompletionAction {
        ReleaseRAAChannelMonitorUpdate {
                counterparty_node_id: PublicKey,
                channel_funding_outpoint: OutPoint,
+               channel_id: ChannelId,
        },
 }
 impl_writeable_tlv_based_enum!(EventCompletionAction,
        (0, ReleaseRAAChannelMonitorUpdate) => {
                (0, channel_funding_outpoint, required),
                (2, counterparty_node_id, required),
+               // Note that by the time we get past the required read above, channel_funding_outpoint will be
+               // filled in, so we can safely unwrap it here.
+               (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
        };
 );
 
@@ -822,7 +833,7 @@ pub(crate) enum RAAMonitorUpdateBlockingAction {
 impl RAAMonitorUpdateBlockingAction {
        fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
                Self::ForwardedPaymentInboundClaim {
-                       channel_id: prev_hop.outpoint.to_channel_id(),
+                       channel_id: prev_hop.channel_id,
                        htlc_id: prev_hop.htlc_id,
                }
        }
@@ -892,7 +903,9 @@ impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
                if require_disconnected && self.is_connected {
                        return false
                }
-               self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
+               !self.channel_by_id.iter().any(|(_, phase)|
+                       matches!(phase, ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_))
+               )
                        && self.monitor_update_blocked_actions.is_empty()
                        && self.in_flight_monitor_updates.is_empty()
        }
@@ -1627,9 +1640,6 @@ pub struct ChannelDetails {
        pub counterparty: ChannelCounterparty,
        /// The Channel's funding transaction output, if we've negotiated the funding transaction with
        /// our counterparty already.
-       ///
-       /// Note that, if this has been set, `channel_id` will be equivalent to
-       /// `funding_txo.unwrap().to_channel_id()`.
        pub funding_txo: Option<OutPoint>,
        /// The features which this channel operates with. See individual features for more info.
        ///
@@ -2146,6 +2156,7 @@ macro_rules! emit_channel_pending_event {
                                counterparty_node_id: $channel.context.get_counterparty_node_id(),
                                user_channel_id: $channel.context.get_user_id(),
                                funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+                               channel_type: Some($channel.context.get_channel_type().clone()),
                        }, None));
                        $channel.context.set_channel_pending_event_emitted();
                }
@@ -2855,7 +2866,7 @@ where
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
                        self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
-               if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
+               if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
                        // There isn't anything we can do if we get an update failure - we're already
                        // force-closing. The monitor update on the required in-memory copy should broadcast
                        // the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -3954,7 +3965,10 @@ where
                                        }
                                        let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
                                        if let Some(funding_batch_state) = funding_batch_state.as_mut() {
-                                               funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
+                                               // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
+                                               // need to fix this somehow to not rely on using the outpoint for the channel ID if we
+                                               // want to support V2 batching here as well.
+                                               funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
                                        }
                                        Ok(outpoint)
                                })
@@ -4179,6 +4193,7 @@ where
                let mut per_source_pending_forward = [(
                        payment.prev_short_channel_id,
                        payment.prev_funding_outpoint,
+                       payment.prev_channel_id,
                        payment.prev_user_channel_id,
                        vec![(pending_htlc_info, payment.prev_htlc_id)]
                )];
@@ -4206,6 +4221,7 @@ where
                                short_channel_id: payment.prev_short_channel_id,
                                user_channel_id: Some(payment.prev_user_channel_id),
                                outpoint: payment.prev_funding_outpoint,
+                               channel_id: payment.prev_channel_id,
                                htlc_id: payment.prev_htlc_id,
                                incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
                                phantom_shared_secret: None,
@@ -4229,7 +4245,7 @@ where
 
                let mut new_events = VecDeque::new();
                let mut failed_forwards = Vec::new();
-               let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
+               let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
                {
                        let mut forward_htlcs = HashMap::new();
                        mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
@@ -4242,20 +4258,21 @@ where
                                                        for forward_info in pending_forwards.drain(..) {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                                               prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
-                                                                               forward_info: PendingHTLCInfo {
+                                                                               prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+                                                                               prev_user_channel_id, forward_info: PendingHTLCInfo {
                                                                                        routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
                                                                                        outgoing_cltv_value, ..
                                                                                }
                                                                        }) => {
                                                                                macro_rules! failure_handler {
                                                                                        ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
-                                                                                               let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+                                                                                               let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
                                                                                                log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
 
                                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                        short_channel_id: prev_short_channel_id,
                                                                                                        user_channel_id: Some(prev_user_channel_id),
+                                                                                                       channel_id: prev_channel_id,
                                                                                                        outpoint: prev_funding_outpoint,
                                                                                                        htlc_id: prev_htlc_id,
                                                                                                        incoming_packet_shared_secret: incoming_shared_secret,
@@ -4319,7 +4336,7 @@ where
                                                                                                                        outgoing_cltv_value, Some(phantom_shared_secret), false, None,
                                                                                                                        current_height, self.default_configuration.accept_mpp_keysend)
                                                                                                                {
-                                                                                                                       Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
+                                                                                                                       Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])),
                                                                                                                        Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                }
                                                                                                        },
@@ -4364,8 +4381,8 @@ where
                                                for forward_info in pending_forwards.drain(..) {
                                                        let queue_fail_htlc_res = match forward_info {
                                                                HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                                       prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
-                                                                       forward_info: PendingHTLCInfo {
+                                                                       prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+                                                                       prev_user_channel_id, forward_info: PendingHTLCInfo {
                                                                                incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
                                                                                routing: PendingHTLCRouting::Forward {
                                                                                        onion_packet, blinded, ..
@@ -4376,6 +4393,7 @@ where
                                                                        let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                user_channel_id: Some(prev_user_channel_id),
+                                                                               channel_id: prev_channel_id,
                                                                                outpoint: prev_funding_outpoint,
                                                                                htlc_id: prev_htlc_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
@@ -4447,8 +4465,8 @@ where
                                        'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
                                                match forward_info {
                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                               prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
-                                                               forward_info: PendingHTLCInfo {
+                                                               prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+                                                               prev_user_channel_id, forward_info: PendingHTLCInfo {
                                                                        routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
                                                                        skimmed_fee_msat, ..
                                                                }
@@ -4482,6 +4500,7 @@ where
                                                                        prev_hop: HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                user_channel_id: Some(prev_user_channel_id),
+                                                                               channel_id: prev_channel_id,
                                                                                outpoint: prev_funding_outpoint,
                                                                                htlc_id: prev_htlc_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
@@ -4513,6 +4532,7 @@ where
                                                                                failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                short_channel_id: $htlc.prev_hop.short_channel_id,
                                                                                                user_channel_id: $htlc.prev_hop.user_channel_id,
+                                                                                               channel_id: prev_channel_id,
                                                                                                outpoint: prev_funding_outpoint,
                                                                                                htlc_id: $htlc.prev_hop.htlc_id,
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
@@ -4593,7 +4613,6 @@ where
                                                                                        #[allow(unused_assignments)] {
                                                                                                committed_to_claimable = true;
                                                                                        }
-                                                                                       let prev_channel_id = prev_funding_outpoint.to_channel_id();
                                                                                        htlcs.push(claimable_htlc);
                                                                                        let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
                                                                                        htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
@@ -4737,19 +4756,19 @@ where
 
                for event in background_events.drain(..) {
                        match event {
-                               BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+                               BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
                                        // The channel has already been closed, so no use bothering to care about the
                                        // monitor updating completing.
                                        let _ = self.chain_monitor.update_channel(funding_txo, &update);
                                },
-                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
+                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
                                        let mut updated_chan = false;
                                        {
                                                let per_peer_state = self.per_peer_state.read().unwrap();
                                                if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
-                                                       match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
+                                                       match peer_state.channel_by_id.entry(channel_id) {
                                                                hash_map::Entry::Occupied(mut chan_phase) => {
                                                                        if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
                                                                                updated_chan = true;
@@ -5288,10 +5307,10 @@ where
                        },
                        HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
-                               ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
+                               ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
                        }) => {
                                log_trace!(
-                                       WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
+                                       WithContext::from(&self.logger, None, Some(*channel_id)),
                                        "Failing {}HTLC with payment_hash {} backwards from us: {:?}",
                                        if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
                                );
@@ -5335,7 +5354,7 @@ where
                                if push_forward_ev { self.push_pending_forwards_ev(); }
                                let mut pending_events = self.pending_events.lock().unwrap();
                                pending_events.push_back((events::Event::HTLCHandlingFailed {
-                                       prev_channel_id: outpoint.to_channel_id(),
+                                       prev_channel_id: *channel_id,
                                        failed_next_destination: destination,
                                }, None));
                        },
@@ -5476,7 +5495,7 @@ where
                }
                if valid_mpp {
                        for htlc in sources.drain(..) {
-                               let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
+                               let prev_hop_chan_id = htlc.prev_hop.channel_id;
                                if let Err((pk, err)) = self.claim_funds_from_hop(
                                        htlc.prev_hop, payment_preimage,
                                        |_, definitely_duplicate| {
@@ -5529,7 +5548,7 @@ where
 
                {
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       let chan_id = prev_hop.outpoint.to_channel_id();
+                       let chan_id = prev_hop.channel_id;
                        let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
                                Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
                                None => None
@@ -5567,6 +5586,7 @@ where
                                                                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                                                        counterparty_node_id,
                                                                                        funding_txo: prev_hop.outpoint,
+                                                                                       channel_id: prev_hop.channel_id,
                                                                                        update: monitor_update.clone(),
                                                                                });
                                                                }
@@ -5581,13 +5601,13 @@ where
 
                                                                log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
                                                                        chan_id, action);
-                                                               let (node_id, funding_outpoint, blocker) =
+                                                               let (node_id, _funding_outpoint, channel_id, blocker) =
                                                                if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
                                                                        downstream_counterparty_node_id: node_id,
                                                                        downstream_funding_outpoint: funding_outpoint,
-                                                                       blocking_action: blocker,
+                                                                       blocking_action: blocker, downstream_channel_id: channel_id,
                                                                } = action {
-                                                                       (node_id, funding_outpoint, blocker)
+                                                                       (node_id, funding_outpoint, channel_id, blocker)
                                                                } else {
                                                                        debug_assert!(false,
                                                                                "Duplicate claims should always free another channel immediately");
@@ -5597,7 +5617,7 @@ where
                                                                        let mut peer_state = peer_state_mtx.lock().unwrap();
                                                                        if let Some(blockers) = peer_state
                                                                                .actions_blocking_raa_monitor_updates
-                                                                               .get_mut(&funding_outpoint.to_channel_id())
+                                                                               .get_mut(&channel_id)
                                                                        {
                                                                                let mut found_blocker = false;
                                                                                blockers.retain(|iter| {
@@ -5626,6 +5646,7 @@ where
                        updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
                                payment_preimage,
                        }],
+                       channel_id: Some(prev_hop.channel_id),
                };
 
                if !during_init {
@@ -5637,7 +5658,8 @@ where
                                // with a preimage we *must* somehow manage to propagate it to the upstream
                                // channel, or we must have an ability to receive the same event and try
                                // again on restart.
-                               log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                               log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
+                                       "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
                                        payment_preimage, update_res);
                        }
                } else {
@@ -5653,7 +5675,7 @@ where
                        // complete the monitor update completion action from `completion_action`.
                        self.pending_background_events.lock().unwrap().push(
                                BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
-                                       prev_hop.outpoint, preimage_update,
+                                       prev_hop.outpoint, prev_hop.channel_id, preimage_update,
                                )));
                }
                // Note that we do process the completion action here. This totally could be a
@@ -5670,8 +5692,9 @@ where
        }
 
        fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
-               forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
-               next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
+               forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
+               startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
+               next_channel_outpoint: OutPoint, next_channel_id: ChannelId,
        ) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
@@ -5681,7 +5704,7 @@ where
                                        debug_assert_eq!(pubkey, path.hops[0].pubkey);
                                }
                                let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
-                                       channel_funding_outpoint: next_channel_outpoint,
+                                       channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
                                        counterparty_node_id: path.hops[0].pubkey,
                                };
                                self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
@@ -5689,15 +5712,17 @@ where
                                        &self.logger);
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
-                               let prev_outpoint = hop_data.outpoint;
+                               let prev_channel_id = hop_data.channel_id;
                                let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
                                #[cfg(debug_assertions)]
                                let claiming_chan_funding_outpoint = hop_data.outpoint;
+                               #[cfg(debug_assertions)]
+                               let claiming_channel_id = hop_data.channel_id;
                                let res = self.claim_funds_from_hop(hop_data, payment_preimage,
                                        |htlc_claim_value_msat, definitely_duplicate| {
                                                let chan_to_release =
                                                        if let Some(node_id) = next_channel_counterparty_node_id {
-                                                               Some((node_id, next_channel_outpoint, completed_blocker))
+                                                               Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker))
                                                        } else {
                                                                // We can only get `None` here if we are processing a
                                                                // `ChannelMonitor`-originated event, in which case we
@@ -5734,7 +5759,7 @@ where
                                                                                },
                                                                                // or the channel we'd unblock is already closed,
                                                                                BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
-                                                                                       (funding_txo, monitor_update)
+                                                                                       (funding_txo, _channel_id, monitor_update)
                                                                                ) => {
                                                                                        if *funding_txo == next_channel_outpoint {
                                                                                                assert_eq!(monitor_update.updates.len(), 1);
@@ -5750,7 +5775,7 @@ where
                                                                                BackgroundEvent::MonitorUpdatesComplete {
                                                                                        channel_id, ..
                                                                                } =>
-                                                                                       *channel_id == claiming_chan_funding_outpoint.to_channel_id(),
+                                                                                       *channel_id == claiming_channel_id,
                                                                        }
                                                                }), "{:?}", *background_events);
                                                        }
@@ -5760,22 +5785,26 @@ where
                                                                Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
                                                                        downstream_counterparty_node_id: other_chan.0,
                                                                        downstream_funding_outpoint: other_chan.1,
-                                                                       blocking_action: other_chan.2,
+                                                                       downstream_channel_id: other_chan.2,
+                                                                       blocking_action: other_chan.3,
                                                                })
                                                        } else { None }
                                                } else {
-                                                       let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+                                                       let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
                                                                if let Some(claimed_htlc_value) = htlc_claim_value_msat {
                                                                        Some(claimed_htlc_value - forwarded_htlc_value)
                                                                } else { None }
                                                        } else { None };
+                                                       debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
+                                                               "skimmed_fee_msat must always be included in total_fee_earned_msat");
                                                        Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                                event: events::Event::PaymentForwarded {
-                                                                       fee_earned_msat,
+                                                                       total_fee_earned_msat,
                                                                        claim_from_onchain_tx: from_onchain,
-                                                                       prev_channel_id: Some(prev_outpoint.to_channel_id()),
-                                                                       next_channel_id: Some(next_channel_outpoint.to_channel_id()),
+                                                                       prev_channel_id: Some(prev_channel_id),
+                                                                       next_channel_id: Some(next_channel_id),
                                                                        outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+                                                                       skimmed_fee_msat,
                                                                },
                                                                downstream_counterparty_and_funding_outpoint: chan_to_release,
                                                        })
@@ -5824,16 +5853,17 @@ where
                                        event, downstream_counterparty_and_funding_outpoint
                                } => {
                                        self.pending_events.lock().unwrap().push_back((event, None));
-                                       if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
-                                               self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
+                                       if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint {
+                                               self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
                                        }
                                },
                                MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
-                                       downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
+                                       downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
                                } => {
                                        self.handle_monitor_update_release(
                                                downstream_counterparty_node_id,
                                                downstream_funding_outpoint,
+                                               downstream_channel_id,
                                                Some(blocking_action),
                                        );
                                },
@@ -5848,7 +5878,7 @@ where
                commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
                pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-       -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+       -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> {
                let logger = WithChannelContext::from(&self.logger, &channel.context);
                log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
                        &channel.context.channel_id(),
@@ -5863,7 +5893,7 @@ where
                let counterparty_node_id = channel.context.get_counterparty_node_id();
                if !pending_forwards.is_empty() {
                        htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
-                               channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
+                               channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
                }
 
                if let Some(msg) = channel_ready {
@@ -5917,7 +5947,7 @@ where
                htlc_forwards
        }
 
-       fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
+       fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
                debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
 
                let counterparty_node_id = match counterparty_node_id {
@@ -5939,11 +5969,11 @@ where
                peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let channel =
-                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
                                chan
                        } else {
                                let update_actions = peer_state.monitor_update_blocked_actions
-                                       .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
+                                       .remove(&channel_id).unwrap_or(Vec::new());
                                mem::drop(peer_state_lock);
                                mem::drop(per_peer_state);
                                self.handle_monitor_update_completion_actions(update_actions);
@@ -6714,7 +6744,7 @@ where
 
        fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
                let funding_txo;
-               let (htlc_source, forwarded_htlc_value) = {
+               let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
@@ -6752,7 +6782,11 @@ where
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
-               self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
+               self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
+                       Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
+                       funding_txo, msg.channel_id
+               );
+
                Ok(())
        }
 
@@ -6840,8 +6874,8 @@ where
        }
 
        #[inline]
-       fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
-               for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
+       fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+               for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
                        let mut push_forward_event = false;
                        let mut new_intercept_events = VecDeque::new();
                        let mut failed_intercept_forwards = Vec::new();
@@ -6860,7 +6894,7 @@ where
                                        match forward_htlcs.entry(scid) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                               prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
+                                                               prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }));
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
@@ -6878,15 +6912,16 @@ where
                                                                                        intercept_id
                                                                                }, None));
                                                                                entry.insert(PendingAddHTLCInfo {
-                                                                                       prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
+                                                                                       prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
                                                                        },
                                                                        hash_map::Entry::Occupied(_) => {
-                                                                               let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
+                                                                               let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
                                                                                log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                        short_channel_id: prev_short_channel_id,
                                                                                        user_channel_id: Some(prev_user_channel_id),
                                                                                        outpoint: prev_funding_outpoint,
+                                                                                       channel_id: prev_channel_id,
                                                                                        htlc_id: prev_htlc_id,
                                                                                        incoming_packet_shared_secret: forward_info.incoming_shared_secret,
                                                                                        phantom_shared_secret: None,
@@ -6906,7 +6941,7 @@ where
                                                                        push_forward_event = true;
                                                                }
                                                                entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                                       prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
+                                                                       prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
                                                        }
                                                }
                                        }
@@ -6950,13 +6985,14 @@ where
        /// the [`ChannelMonitorUpdate`] in question.
        fn raa_monitor_updates_held(&self,
                actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
-               channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
+               channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
        ) -> bool {
                actions_blocking_raa_monitor_updates
-                       .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
+                       .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
                || self.pending_events.lock().unwrap().iter().any(|(_, action)| {
                        action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
                                channel_funding_outpoint,
+                               channel_id,
                                counterparty_node_id,
                        })
                })
@@ -6973,7 +7009,7 @@ where
 
                        if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
                                return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
-                                       chan.context().get_funding_txo().unwrap(), counterparty_node_id);
+                                       chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
                        }
                }
                false
@@ -6995,7 +7031,7 @@ where
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
                                                        self.raa_monitor_updates_held(
-                                                               &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+                                                               &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
                                                                *counterparty_node_id)
                                                } else { false };
                                                let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
@@ -7241,22 +7277,24 @@ where
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
+               for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
-                                               let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
+                                               let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
                                                if let Some(preimage) = htlc_update.payment_preimage {
                                                        log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
-                                                       self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
+                                                       self.claim_funds_internal(htlc_update.source, preimage,
+                                                               htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
+                                                               false, counterparty_node_id, funding_outpoint, channel_id);
                                                } else {
                                                        log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
-                                                       let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+                                                       let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
                                                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
                                                }
                                        },
-                                       MonitorEvent::HolderForceClosed(funding_outpoint) => {
+                                       MonitorEvent::HolderForceClosed(_funding_outpoint) => {
                                                let counterparty_node_id_opt = match counterparty_node_id {
                                                        Some(cp_id) => Some(cp_id),
                                                        None => {
@@ -7272,7 +7310,7 @@ where
                                                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                                let peer_state = &mut *peer_state_lock;
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                                                               if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+                                                               if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
                                                                        if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
                                                                                failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
@@ -7291,8 +7329,8 @@ where
                                                        }
                                                }
                                        },
-                                       MonitorEvent::Completed { funding_txo, monitor_update_id } => {
-                                               self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
+                                       MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
+                                               self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
                                        },
                                }
                        }
@@ -7509,14 +7547,14 @@ where
                        // Channel::force_shutdown tries to make us do) as we may still be in initialization,
                        // so we track the update internally and handle it when the user next calls
                        // timer_tick_occurred, guaranteeing we're running normally.
-                       if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
+                       if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() {
                                assert_eq!(update.updates.len(), 1);
                                if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
                                        assert!(should_broadcast);
                                } else { unreachable!(); }
                                self.pending_background_events.lock().unwrap().push(
                                        BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
-                                               counterparty_node_id, funding_txo, update
+                                               counterparty_node_id, funding_txo, update, channel_id,
                                        });
                        }
                        self.finish_close_channel(failure);
@@ -8073,9 +8111,12 @@ where
        /// [`Event`] being handled) completes, this should be called to restore the channel to normal
        /// operation. It will double-check that nothing *else* is also blocking the same channel from
        /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
-       fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+       fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
+               channel_funding_outpoint: OutPoint, channel_id: ChannelId,
+               mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+
                let logger = WithContext::from(
-                       &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
+                       &self.logger, Some(counterparty_node_id), Some(channel_id),
                );
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
@@ -8085,28 +8126,29 @@ where
                                if let Some(blocker) = completed_blocker.take() {
                                        // Only do this on the first iteration of the loop.
                                        if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
-                                               .get_mut(&channel_funding_outpoint.to_channel_id())
+                                               .get_mut(&channel_id)
                                        {
                                                blockers.retain(|iter| iter != &blocker);
                                        }
                                }
 
                                if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
-                                       channel_funding_outpoint, counterparty_node_id) {
+                                       channel_funding_outpoint, channel_id, counterparty_node_id) {
                                        // Check that, while holding the peer lock, we don't have anything else
                                        // blocking monitor updates for this channel. If we do, release the monitor
                                        // update(s) when those blockers complete.
                                        log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
-                                               &channel_funding_outpoint.to_channel_id());
+                                               &channel_id);
                                        break;
                                }
 
-                               if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+                               if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
+                                       channel_id) {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
                                                if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
                                                        log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
-                                                               channel_funding_outpoint.to_channel_id());
+                                                               channel_id);
                                                        handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
                                                                peer_state_lck, peer_state, per_peer_state, chan);
                                                        if further_update_exists {
@@ -8116,7 +8158,7 @@ where
                                                        }
                                                } else {
                                                        log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
-                                                               channel_funding_outpoint.to_channel_id());
+                                                               channel_id);
                                                }
                                        }
                                }
@@ -8133,9 +8175,9 @@ where
                for action in actions {
                        match action {
                                EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
-                                       channel_funding_outpoint, counterparty_node_id
+                                       channel_funding_outpoint, channel_id, counterparty_node_id
                                } => {
-                                       self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
+                                       self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
                                }
                        }
                }
@@ -8531,6 +8573,7 @@ where
                                                incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
                                                phantom_shared_secret: None,
                                                outpoint: htlc.prev_funding_outpoint,
+                                               channel_id: htlc.prev_channel_id,
                                                blinded_failure: htlc.forward_info.routing.blinded_failure(),
                                        });
 
@@ -8542,7 +8585,7 @@ where
                                                        HTLCFailReason::from_failure_code(0x2000 | 2),
                                                        HTLCDestination::InvalidForward { requested_forward_scid }));
                                        let logger = WithContext::from(
-                                               &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
+                                               &self.logger, None, Some(htlc.prev_channel_id)
                                        );
                                        log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
                                        false
@@ -8870,10 +8913,12 @@ where
                                                        }
                                                        &mut chan.context
                                                },
-                                               // Unfunded channels will always be removed.
-                                               ChannelPhase::UnfundedOutboundV1(chan) => {
-                                                       &mut chan.context
+                                               // We retain UnfundedOutboundV1 channel for some time in case
+                                               // peer unexpectedly disconnects, and intends to reconnect again.
+                                               ChannelPhase::UnfundedOutboundV1(_) => {
+                                                       return true;
                                                },
+                                               // Unfunded inbound channels will always be removed.
                                                ChannelPhase::UnfundedInboundV1(chan) => {
                                                        &mut chan.context
                                                },
@@ -9012,15 +9057,31 @@ where
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
 
-                               peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
-                                       if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
-                               ).for_each(|chan| {
-                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
-                                       pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                               node_id: chan.context.get_counterparty_node_id(),
-                                               msg: chan.get_channel_reestablish(&&logger),
-                                       });
-                               });
+                               for (_, phase) in peer_state.channel_by_id.iter_mut() {
+                                       match phase {
+                                               ChannelPhase::Funded(chan) => {
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+                                                               node_id: chan.context.get_counterparty_node_id(),
+                                                               msg: chan.get_channel_reestablish(&&logger),
+                                                       });
+                                               }
+
+                                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                                       pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+                                                               node_id: chan.context.get_counterparty_node_id(),
+                                                               msg: chan.get_open_channel(self.chain_hash),
+                                                       });
+                                               }
+
+                                               ChannelPhase::UnfundedInboundV1(_) => {
+                                                       // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
+                                                       // they are not persisted and won't be recovered after a crash.
+                                                       // Therefore, they shouldn't exist at this point.
+                                                       debug_assert!(false);
+                                               }
+                                       }
+                               }
                        }
 
                        return NotifyOption::SkipPersistHandleEvents;
@@ -9627,6 +9688,9 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
        (4, htlc_id, required),
        (6, incoming_packet_shared_secret, required),
        (7, user_channel_id, option),
+       // Note that by the time we get past the required read for type 2 above, outpoint will be
+       // filled in, so we can safely unwrap it here.
+       (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
 });
 
 impl Writeable for ClaimableHTLC {
@@ -9778,6 +9842,9 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, {
        (2, prev_short_channel_id, required),
        (4, prev_htlc_id, required),
        (6, prev_funding_outpoint, required),
+       // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
+       // filled in, so we can safely unwrap it here.
+       (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
 });
 
 impl Writeable for HTLCForwardInfo {
@@ -10291,12 +10358,14 @@ where
                let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
                let mut close_background_events = Vec::new();
+               let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize);
                for _ in 0..channel_count {
                        let mut channel: Channel<SP> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                        ))?;
                        let logger = WithChannelContext::from(&args.logger, &channel.context);
                        let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+                       funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
                                if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
@@ -10326,9 +10395,9 @@ where
                                        if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
                                                return Err(DecodeError::InvalidValue);
                                        }
-                                       if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
+                                       if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update {
                                                close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
-                                                       counterparty_node_id, funding_txo, update
+                                                       counterparty_node_id, funding_txo, channel_id, update
                                                });
                                        }
                                        failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
@@ -10407,14 +10476,16 @@ where
                for (funding_txo, monitor) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
                                let logger = WithChannelMonitor::from(&args.logger, monitor);
+                               let channel_id = monitor.channel_id();
                                log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
-                                       &funding_txo.to_channel_id());
+                                       &channel_id);
                                let monitor_update = ChannelMonitorUpdate {
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        counterparty_node_id: None,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+                                       channel_id: Some(monitor.channel_id()),
                                };
-                               close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+                               close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
                        }
                }
 
@@ -10591,12 +10662,13 @@ where
                                $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
                                for update in $chan_in_flight_upds.iter() {
                                        log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
-                                               update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
+                                               update.update_id, $channel_info_log, &$monitor.channel_id());
                                        max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
                                        pending_background_events.push(
                                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                        counterparty_node_id: $counterparty_node_id,
                                                        funding_txo: $funding_txo,
+                                                       channel_id: $monitor.channel_id(),
                                                        update: update.clone(),
                                                });
                                }
@@ -10607,7 +10679,7 @@ where
                                        pending_background_events.push(
                                                BackgroundEvent::MonitorUpdatesComplete {
                                                        counterparty_node_id: $counterparty_node_id,
-                                                       channel_id: $funding_txo.to_channel_id(),
+                                                       channel_id: $monitor.channel_id(),
                                                });
                                }
                                if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
@@ -10661,7 +10733,8 @@ where
 
                if let Some(in_flight_upds) = in_flight_monitor_updates {
                        for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
-                               let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
+                               let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
+                               let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
                                if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
                                        // Now that we've removed all the in-flight monitor updates for channels that are
                                        // still open, we need to replay any monitor updates that are for closed channels,
@@ -10674,8 +10747,8 @@ where
                                                funding_txo, monitor, peer_state, logger, "closed ");
                                } else {
                                        log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
-                                       log_error!(logger, " The ChannelMonitor for channel {} is missing.",
-                                               &funding_txo.to_channel_id());
+                                       log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
+                                               channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
                                        log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
@@ -10763,7 +10836,7 @@ where
                                                                                if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
                                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                                log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
-                                                                                                       &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+                                                                                                       &htlc.payment_hash, &monitor.channel_id());
                                                                                                false
                                                                                        } else { true }
                                                                                } else { true }
@@ -10773,7 +10846,7 @@ where
                                                                pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
-                                                                                       &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+                                                                                       &htlc.payment_hash, &monitor.channel_id());
                                                                                pending_events_read.retain(|(event, _)| {
                                                                                        if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
                                                                                                intercepted_id != ev_id
@@ -10797,6 +10870,7 @@ where
                                                                        let compl_action =
                                                                                EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
                                                                                        channel_funding_outpoint: monitor.get_funding_txo().0,
+                                                                                       channel_id: monitor.channel_id(),
                                                                                        counterparty_node_id: path.hops[0].pubkey,
                                                                                };
                                                                        pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
@@ -10822,7 +10896,7 @@ where
                                                                        // channel_id -> peer map entry).
                                                                        counterparty_opt.is_none(),
                                                                        counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
-                                                                       monitor.get_funding_txo().0))
+                                                                       monitor.get_funding_txo().0, monitor.channel_id()))
                                                        } else { None }
                                                } else {
                                                        // If it was an outbound payment, we've handled it above - if a preimage
@@ -10995,7 +11069,7 @@ where
                                                // this channel as well. On the flip side, there's no harm in restarting
                                                // without the new monitor persisted - we'll end up right back here on
                                                // restart.
-                                               let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
+                                               let previous_channel_id = claimable_htlc.prev_hop.channel_id;
                                                if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
                                                        let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
@@ -11028,14 +11102,14 @@ where
                                        for action in actions.iter() {
                                                if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                        downstream_counterparty_and_funding_outpoint:
-                                                               Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
+                                                               Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
                                                } = action {
                                                        if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
                                                                log_trace!(logger,
                                                                        "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
-                                                                       blocked_channel_outpoint.to_channel_id());
+                                                                       blocked_channel_id);
                                                                blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
-                                                                       .entry(blocked_channel_outpoint.to_channel_id())
+                                                                       .entry(*blocked_channel_id)
                                                                        .or_insert_with(Vec::new).push(blocking_action.clone());
                                                        } else {
                                                                // If the channel we were blocking has closed, we don't need to
@@ -11115,12 +11189,12 @@ where
                        channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
 
-               for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
+               for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
                        // We use `downstream_closed` in place of `from_onchain` here just as a guess - we
                        // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
                        // channel is closed we just assume that it probably came from an on-chain claim.
-                       channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
-                               downstream_closed, true, downstream_node_id, downstream_funding);
+                       channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
+                               downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
                }
 
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@ -11807,8 +11881,8 @@ mod tests {
                }
                let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 
-               check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
-               check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
+               check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
        }
 
        fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
index 2563868e779251525ebed3453963ab186de6bfcb..170d5b0b022b5b06a313c048e61cf828287181bb 100644 (file)
@@ -252,7 +252,7 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block)
 
 fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
        // Ensure `get_claimable_balances`' self-tests never panic
-       for funding_outpoint in node.chain_monitor.chain_monitor.list_monitors() {
+       for (funding_outpoint, _channel_id) in node.chain_monitor.chain_monitor.list_monitors() {
                node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances();
        }
 }
@@ -601,7 +601,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                        let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                        let mut deserialized_monitors = Vec::new();
                        {
-                               for outpoint in self.chain_monitor.chain_monitor.list_monitors() {
+                               for (outpoint, _channel_id) in self.chain_monitor.chain_monitor.list_monitors() {
                                        let mut w = test_utils::TestVecWriter(Vec::new());
                                        self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap();
                                        let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
@@ -644,7 +644,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
-                               if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
+                               let funding_outpoint = deserialized_monitor.get_funding_txo().0;
+                               if chain_monitor.watch_channel(funding_outpoint, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
                                        panic!();
                                }
                        }
@@ -1068,7 +1069,8 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User
        assert!(node_read.is_empty());
 
        for monitor in monitors_read.drain(..) {
-               assert_eq!(node.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+               let funding_outpoint = monitor.get_funding_txo().0;
+               assert_eq!(node.chain_monitor.watch_channel(funding_outpoint, monitor),
                        Ok(ChannelMonitorUpdateStatus::Completed));
                check_added_monitors!(node, 1);
        }
@@ -2201,14 +2203,19 @@ macro_rules! expect_payment_path_successful {
 
 pub fn expect_payment_forwarded<CM: AChannelManager, H: NodeHolder<CM=CM>>(
        event: Event, node: &H, prev_node: &H, next_node: &H, expected_fee: Option<u64>,
-       upstream_force_closed: bool, downstream_force_closed: bool
+       expected_extra_fees_msat: Option<u64>, upstream_force_closed: bool,
+       downstream_force_closed: bool
 ) {
        match event {
                Event::PaymentForwarded {
-                       fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
-                       outbound_amount_forwarded_msat: _
+                       total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
+                       outbound_amount_forwarded_msat: _, skimmed_fee_msat
                } => {
-                       assert_eq!(fee_earned_msat, expected_fee);
+                       assert_eq!(total_fee_earned_msat, expected_fee);
+
+                       // Check that the (knowingly) withheld amount is always less or equal to the expected
+                       // overpaid amount.
+                       assert!(skimmed_fee_msat == expected_extra_fees_msat);
                        if !upstream_force_closed {
                                // Is the event prev_channel_id in one of the channels between the two nodes?
                                assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == prev_node.node().get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
@@ -2224,13 +2231,15 @@ pub fn expect_payment_forwarded<CM: AChannelManager, H: NodeHolder<CM=CM>>(
        }
 }
 
+#[macro_export]
 macro_rules! expect_payment_forwarded {
        ($node: expr, $prev_node: expr, $next_node: expr, $expected_fee: expr, $upstream_force_closed: expr, $downstream_force_closed: expr) => {
                let mut events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                $crate::ln::functional_test_utils::expect_payment_forwarded(
-                       events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee,
-                       $upstream_force_closed, $downstream_force_closed);
+                       events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee, None,
+                       $upstream_force_closed, $downstream_force_closed
+               );
        }
 }
 
@@ -2550,24 +2559,54 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(
        origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool,
        our_payment_preimage: PaymentPreimage
 ) -> u64 {
-       let extra_fees = vec![0; expected_paths.len()];
-       do_claim_payment_along_route_with_extra_penultimate_hop_fees(origin_node, expected_paths,
-               &extra_fees[..], skip_last, our_payment_preimage)
-}
-
-pub fn do_claim_payment_along_route_with_extra_penultimate_hop_fees<'a, 'b, 'c>(
-       origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], expected_extra_fees:
-       &[u32], skip_last: bool, our_payment_preimage: PaymentPreimage
-) -> u64 {
-       assert_eq!(expected_paths.len(), expected_extra_fees.len());
        for path in expected_paths.iter() {
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
        expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage);
-       pass_claimed_payment_along_route(origin_node, expected_paths, expected_extra_fees, skip_last, our_payment_preimage)
+       pass_claimed_payment_along_route(
+               ClaimAlongRouteArgs::new(origin_node, expected_paths, our_payment_preimage)
+                       .skip_last(skip_last)
+       )
+}
+
+pub struct ClaimAlongRouteArgs<'a, 'b, 'c, 'd> {
+       pub origin_node: &'a Node<'b, 'c, 'd>,
+       pub expected_paths: &'a [&'a [&'a Node<'b, 'c, 'd>]],
+       pub expected_extra_fees: Vec<u32>,
+       pub expected_min_htlc_overpay: Vec<u32>,
+       pub skip_last: bool,
+       pub payment_preimage: PaymentPreimage,
 }
 
-pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], expected_extra_fees: &[u32], skip_last: bool, our_payment_preimage: PaymentPreimage) -> u64 {
+impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> {
+       pub fn new(
+               origin_node: &'a Node<'b, 'c, 'd>, expected_paths: &'a [&'a [&'a Node<'b, 'c, 'd>]],
+               payment_preimage: PaymentPreimage,
+       ) -> Self {
+               Self {
+                       origin_node, expected_paths, expected_extra_fees: vec![0; expected_paths.len()],
+                       expected_min_htlc_overpay: vec![0; expected_paths.len()], skip_last: false, payment_preimage,
+               }
+       }
+       pub fn skip_last(mut self, skip_last: bool) -> Self {
+               self.skip_last = skip_last;
+               self
+       }
+       pub fn with_expected_extra_fees(mut self, extra_fees: Vec<u32>) -> Self {
+               self.expected_extra_fees = extra_fees;
+               self
+       }
+       pub fn with_expected_min_htlc_overpay(mut self, extra_fees: Vec<u32>) -> Self {
+               self.expected_min_htlc_overpay = extra_fees;
+               self
+       }
+}
+
+pub fn pass_claimed_payment_along_route<'a, 'b, 'c, 'd>(args: ClaimAlongRouteArgs) -> u64 {
+       let ClaimAlongRouteArgs {
+               origin_node, expected_paths, expected_extra_fees, expected_min_htlc_overpay, skip_last,
+               payment_preimage: our_payment_preimage
+       } = args;
        let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events();
        assert_eq!(claim_event.len(), 1);
        match claim_event[0] {
@@ -2664,8 +2703,17 @@ pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, '
                                                        channel.context().config().forwarding_fee_base_msat
                                                }
                                        };
-                                       if $idx == 1 { fee += expected_extra_fees[i]; }
-                                       expect_payment_forwarded!(*$node, $next_node, $prev_node, Some(fee as u64), false, false);
+
+                                       let mut expected_extra_fee = None;
+                                       if $idx == 1 {
+                                               fee += expected_extra_fees[i];
+                                               fee += expected_min_htlc_overpay[i];
+                                               expected_extra_fee = if expected_extra_fees[i] > 0 { Some(expected_extra_fees[i] as u64) } else { None };
+                                       }
+                                       let mut events = $node.node.get_and_clear_pending_events();
+                                       assert_eq!(events.len(), 1);
+                                       expect_payment_forwarded(events.pop().unwrap(), *$node, $next_node, $prev_node,
+                                               Some(fee as u64), expected_extra_fee, false, false);
                                        expected_total_fee_msat += fee as u64;
                                        check_added_monitors!($node, 1);
                                        let new_next_msgs = if $new_msgs {
index 2f43056fb005e9312bedaee228b449bd7238845f..9ba71edde39ab487ae319d7348ecdba17ddfb188 100644 (file)
@@ -871,8 +871,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -985,8 +985,8 @@ fn test_update_fee() {
        assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1104,17 +1104,17 @@ fn fake_network_test() {
 
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2889,8 +2889,10 @@ fn test_htlc_on_chain_success() {
        }
        let chan_id = Some(chan_1.2);
        match forwarded_events[1] {
-               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
-                       assert_eq!(fee_earned_msat, Some(1000));
+               Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+                       next_channel_id, outbound_amount_forwarded_msat, ..
+               } => {
+                       assert_eq!(total_fee_earned_msat, Some(1000));
                        assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
                        assert_eq!(next_channel_id, Some(chan_2.2));
@@ -2899,8 +2901,10 @@ fn test_htlc_on_chain_success() {
                _ => panic!()
        }
        match forwarded_events[2] {
-               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
-                       assert_eq!(fee_earned_msat, Some(1000));
+               Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+                       next_channel_id, outbound_amount_forwarded_msat, ..
+               } => {
+                       assert_eq!(total_fee_earned_msat, Some(1000));
                        assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
                        assert_eq!(next_channel_id, Some(chan_2.2));
@@ -3695,7 +3699,7 @@ fn test_dup_events_on_peer_disconnect() {
 #[test]
 fn test_peer_disconnected_before_funding_broadcasted() {
        // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
-       // before the funding transaction has been broadcasted.
+       // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -3724,12 +3728,19 @@ fn test_peer_disconnected_before_funding_broadcasted() {
                assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
        }
 
-       // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
-       // disconnected before the funding transaction was broadcasted.
+       // The peers disconnect before the funding is broadcasted.
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
+       // The time for peers to reconnect expires.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+               nodes[0].node.timer_tick_occurred();
+       }
+
+       // Ensure that the channel is closed with `ClosureReason::HolderForceClosed`
+       // when the peers are disconnected and do not reconnect before the funding
+       // transaction is broadcasted.
+       check_closed_event!(&nodes[0], 2, ClosureReason::HolderForceClosed, true
                , [nodes[1].node.get_our_node_id()], 1000000);
        check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
                , [nodes[0].node.get_our_node_id()], 1000000);
@@ -4912,8 +4923,10 @@ fn test_onchain_to_onchain_claim() {
                _ => panic!("Unexpected event"),
        }
        match events[1] {
-               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
-                       assert_eq!(fee_earned_msat, Some(1000));
+               Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+                       next_channel_id, outbound_amount_forwarded_msat, ..
+               } => {
+                       assert_eq!(total_fee_earned_msat, Some(1000));
                        assert_eq!(prev_channel_id, Some(chan_1.2));
                        assert_eq!(claim_from_onchain_tx, true);
                        assert_eq!(next_channel_id, Some(chan_2.2));
@@ -5619,7 +5632,7 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        mine_transaction(&nodes[0], &closing_tx);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5627,7 +5640,7 @@ fn test_static_output_closing_tx() {
        check_spends!(spend_txn[0], closing_tx);
 
        mine_transaction(&nodes[1], &closing_tx);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -8684,7 +8697,7 @@ fn test_pre_lockin_no_chan_closed_update() {
        check_added_monitors!(nodes[0], 0);
 
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
-       let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+       let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
        check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
@@ -9028,7 +9041,7 @@ fn test_peer_funding_sidechannel() {
        check_added_monitors!(nodes[1], 1);
        expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
        let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
-       check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_output.to_channel_id(), true, reason)]);
+       check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
 
        let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
@@ -9089,7 +9102,7 @@ fn test_duplicate_funding_err_in_funding() {
 
        let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
        let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
-       assert_eq!(real_chan_funding_txo.to_channel_id(), real_channel_id);
+       assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
 
        nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
        let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
@@ -9181,7 +9194,7 @@ fn test_duplicate_chan_id() {
        let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
 
        let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
-       let channel_id = funding_outpoint.to_channel_id();
+       let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
 
        // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
        // temporary one).
@@ -10506,6 +10519,90 @@ fn test_remove_expired_inbound_unfunded_channels() {
        check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
 }
 
+#[test]
+fn test_channel_close_when_not_timely_accepted() {
+       // Create network of two nodes
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Simulate peer-disconnects mid-handshake
+       // The channel is initiated from the node 0 side,
+       // but the nodes disconnect before node 1 could send accept channel
+       let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+       let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+       // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+       assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+       // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
+       assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+       // In the meantime, some time passes.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+               nodes[0].node.timer_tick_occurred();
+       }
+
+       // Since we disconnected from peer and did not connect back within time,
+       // we should have forced-closed the channel by now.
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       assert_eq!(nodes[0].node.list_channels().len(), 0);
+
+       {
+               // Since accept channel message was never received
+               // The channel should be forced close by now from node 0 side
+               // and the peer removed from per_peer_state
+               let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+               assert_eq!(node_0_per_peer_state.len(), 0);
+       }
+}
+
+#[test]
+fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
+       // Create network of two nodes
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Simulate peer-disconnects mid-handshake
+       // The channel is initiated from the node 0 side,
+       // but the nodes disconnect before node 1 could send accept channel
+       let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+       let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+       // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+       assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+       // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
+       assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+       // The peers now reconnect
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
+
+       // Make sure the SendOpenChannel message is added to node_0 pending message events
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match &msg_events[0] {
+               MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
+               _ => panic!("Unexpected message."),
+       }
+}
+
 fn do_test_multi_post_event_actions(do_reload: bool) {
        // Tests handling multiple post-Event actions at once.
        // There is specific code in ChannelManager to handle channels where multiple post-Event
@@ -10635,7 +10732,7 @@ fn test_batch_channel_open() {
 
        // Complete the persistence of the monitor.
        nodes[0].chain_monitor.complete_sole_pending_chan_update(
-               &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id()
+               &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
        );
        let events = nodes[0].node.get_and_clear_pending_events();
 
@@ -10662,7 +10759,9 @@ fn test_batch_channel_open() {
 }
 
 #[test]
-fn test_disconnect_in_funding_batch() {
+fn test_close_in_funding_batch() {
+       // This test ensures that if one of the channels
+       // in the batch closes, the complete batch will close.
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
@@ -10686,14 +10785,39 @@ fn test_disconnect_in_funding_batch() {
        // The transaction should not have been broadcast before all channels are ready.
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
 
-       // The remaining peer in the batch disconnects.
-       nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
-
-       // The channels in the batch will close immediately.
+       // Force-close the channel for which we've completed the initial monitor.
        let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
-       let channel_id_1 = funding_txo_1.to_channel_id();
-       let channel_id_2 = funding_txo_2.to_channel_id();
+       let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+       let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
+
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+
+       // The monitor should become closed.
+       check_added_monitors(&nodes[0], 1);
+       {
+               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+               assert_eq!(monitor_updates_1.len(), 1);
+               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+       }
+
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       match msg_events[0] {
+               MessageSendEvent::HandleError { .. } => (),
+               _ => panic!("Unexpected message."),
+       }
+
+       // We broadcast the commitment transaction as part of the force-close.
+       {
+               let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(broadcasted_txs.len(), 1);
+               assert!(broadcasted_txs[0].txid() != tx.txid());
+               assert_eq!(broadcasted_txs[0].input.len(), 1);
+               assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
+       }
+
+       // All channels in the batch should close immediately.
        check_closed_events(&nodes[0], &[
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_1),
@@ -10711,19 +10835,6 @@ fn test_disconnect_in_funding_batch() {
                },
        ]);
 
-       // The monitor should become closed.
-       check_added_monitors(&nodes[0], 1);
-       {
-               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
-               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
-               assert_eq!(monitor_updates_1.len(), 1);
-               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
-       }
-
-       // The funding transaction should not have been broadcast, and therefore, we don't need
-       // to broadcast a force-close transaction for the closed monitor.
-       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
-
        // Ensure the channels don't exist anymore.
        assert!(nodes[0].node.list_channels().is_empty());
 }
@@ -10766,8 +10877,8 @@ fn test_batch_funding_close_after_funding_signed() {
        // Force-close the channel for which we've completed the initial monitor.
        let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
-       let channel_id_1 = funding_txo_1.to_channel_id();
-       let channel_id_2 = funding_txo_2.to_channel_id();
+       let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+       let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
        nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
        check_added_monitors(&nodes[0], 2);
        {
@@ -10827,7 +10938,7 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
-       let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id();
+       let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
 
        assert_eq!(nodes[0].node.list_channels().len(), 1);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
index b62701814399d99240354d1200908b61732e99ae..275b28e4c515ec69632e6218d320c3753f546b23 100644 (file)
@@ -15,7 +15,7 @@ use crate::chain::transaction::OutPoint;
 use crate::chain::chaininterface::{LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight};
 use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
-use crate::ln::channel;
+use crate::ln::{channel, ChannelId};
 use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields};
 use crate::ln::msgs::ChannelMessageHandler;
 use crate::util::config::UserConfig;
@@ -176,7 +176,7 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) {
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
-       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+       assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
 
        let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64;
        let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_id);
@@ -257,8 +257,8 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) {
                spendable_outputs_b
        );
 
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
 }
 
 #[test]
@@ -327,7 +327,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) {
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
-       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+       assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
 
        // This HTLC is immediately claimed, giving node B the preimage
        let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
@@ -1121,7 +1121,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
-       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+       assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
 
        // We create five HTLCs for B to claim against A's revoked commitment transaction:
        //
@@ -1403,7 +1403,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) {
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 12_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
-       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+       assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
 
        let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
        let failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1;
@@ -1705,7 +1705,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) {
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
-       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+       assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
 
        // We create two HTLCs, one which we will give A the preimage to to generate an HTLC-Success
        // transaction, and one which we will not, allowing B to claim the HTLC output in an aggregated
index 0db815a708532f590d5d7821d2f0bc156e03ad8f..09c022a4c87b1f3e8d106126fde4fd212c22b75a 100644 (file)
@@ -277,10 +277,12 @@ fn mpp_retry_overpay() {
 
        // Can't use claim_payment_along_route as it doesn't support overpayment, so we break out the
        // individual steps here.
+       nodes[3].node.claim_funds(payment_preimage);
        let extra_fees = vec![0, total_overpaid_amount];
-       let expected_total_fee_msat = do_claim_payment_along_route_with_extra_penultimate_hop_fees(
-               &nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], &extra_fees[..], false,
-               payment_preimage);
+       let expected_route = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]];
+       let args = ClaimAlongRouteArgs::new(&nodes[0], &expected_route[..], payment_preimage)
+               .with_expected_min_htlc_overpay(extra_fees);
+       let expected_total_fee_msat = pass_claimed_payment_along_route(args);
        expect_payment_sent!(&nodes[0], payment_preimage, Some(expected_total_fee_msat));
 }
 
@@ -2155,9 +2157,10 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) {
        let mut expected_paths = Vec::new();
        for _ in 0..num_mpp_parts { expected_paths_vecs.push(vec!(&nodes[1], &nodes[2])); }
        for i in 0..num_mpp_parts { expected_paths.push(&expected_paths_vecs[i][..]); }
-       let total_fee_msat = do_claim_payment_along_route_with_extra_penultimate_hop_fees(
-               &nodes[0], &expected_paths[..], &vec![skimmed_fee_msat as u32; num_mpp_parts][..], false,
-               payment_preimage);
+       expected_paths[0].last().unwrap().node.claim_funds(payment_preimage);
+       let args = ClaimAlongRouteArgs::new(&nodes[0], &expected_paths[..], payment_preimage)
+               .with_expected_extra_fees(vec![skimmed_fee_msat as u32; num_mpp_parts]);
+       let total_fee_msat = pass_claimed_payment_along_route(args);
        // The sender doesn't know that the penultimate hop took an extra fee.
        expect_payment_sent(&nodes[0], payment_preimage,
                Some(Some(total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64)), true, true);
@@ -3722,7 +3725,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) {
        match (known_tlvs, even_tlvs) {
                (true, _) => {
                        nodes[1].node.claim_funds_with_known_custom_tlvs(our_payment_preimage);
-                       let expected_total_fee_msat = pass_claimed_payment_along_route(&nodes[0], &[&[&nodes[1]]], &[0; 1], false, our_payment_preimage);
+                       let expected_total_fee_msat = pass_claimed_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], our_payment_preimage));
                        expect_payment_sent!(&nodes[0], our_payment_preimage, Some(expected_total_fee_msat));
                },
                (false, false) => {
index ae3f9c69035a22283bf523ad79313808de827f31..3e07f50b203a0e6c1125ed7e257e376a1d68eb7f 100644 (file)
@@ -2006,7 +2006,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id,
-                                                                       log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
+                                                                       ChannelId::v1_from_funding_txid(msg.funding_txid.as_byte_array(), msg.funding_output_index));
                                                        // TODO: If the peer is gone we should generate a DiscardFunding event
                                                        // indicating to the wallet that they should just throw away this funding transaction
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
index 85e757204fe5205c7534f614a71c1085535e25c5..1bdebafdc6fe2ae407d447b6147774e142af4b87 100644 (file)
@@ -18,7 +18,7 @@ use crate::ln::channelmanager::{MIN_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnion
 use crate::routing::gossip::RoutingFees;
 use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop};
 use crate::ln::features::ChannelTypeFeatures;
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ChannelUpdate, ErrorAction};
 use crate::ln::wire::Encode;
 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
@@ -617,7 +617,7 @@ fn test_0conf_channel_with_async_monitor() {
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
-       let channel_id = funding_output.to_channel_id();
+       let channel_id = ChannelId::v1_from_funding_outpoint(funding_output);
        nodes[1].chain_monitor.complete_sole_pending_chan_update(&channel_id);
        expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
 
index 1ac290383a48761fa1e892bbc01fc73685aad983..76dd78ae2ad6a712560c5d6d6aa1a1c0b2e182e5 100644 (file)
@@ -16,7 +16,7 @@ use crate::sign::EntropySource;
 use crate::chain::transaction::OutPoint;
 use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
 use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils;
@@ -201,7 +201,7 @@ fn test_no_txn_manager_serialize_deserialize() {
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        let chan_0_monitor_serialized =
-               get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
+               get_monitor!(nodes[0], ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 })).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
@@ -446,7 +446,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        assert!(nodes_0_read.is_empty());
 
        for monitor in node_0_monitors.drain(..) {
-               assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+               let funding_outpoint = monitor.get_funding_txo().0;
+               assert_eq!(nodes[0].chain_monitor.watch_channel(funding_outpoint, monitor),
                        Ok(ChannelMonitorUpdateStatus::Completed));
                check_added_monitors!(nodes[0], 1);
        }
@@ -829,8 +830,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
        let mut original_monitor = test_utils::TestVecWriter(Vec::new());
        if !persist_both_monitors {
-               for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
-                       if outpoint.to_channel_id() == chan_id_not_persisted {
+               for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+                       if channel_id == chan_id_not_persisted {
                                assert!(original_monitor.0.is_empty());
                                nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
                        }
@@ -849,16 +850,16 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
        // with the old ChannelManager.
        let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
-       for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
-               if outpoint.to_channel_id() == chan_id_persisted {
+       for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+               if channel_id == chan_id_persisted {
                        assert!(updated_monitor.0.is_empty());
                        nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
                }
        }
        // If `persist_both_monitors` is set, get the second monitor here as well
        if persist_both_monitors {
-               for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
-                       if outpoint.to_channel_id() == chan_id_not_persisted {
+               for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+                       if channel_id == chan_id_not_persisted {
                                assert!(original_monitor.0.is_empty());
                                nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
                        }
@@ -1220,7 +1221,7 @@ fn test_reload_partial_funding_batch() {
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
 
        // Reload the node while a subset of the channels in the funding batch have persisted monitors.
-       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
+       let channel_id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 });
        let node_encoded = nodes[0].node.encode();
        let channel_monitor_1_serialized = get_monitor!(nodes[0], channel_id_1).encode();
        reload_node!(nodes[0], node_encoded, &[&channel_monitor_1_serialized], new_persister, new_chain_monitor, new_channel_manager);
index cce012aa99203edf4f5b327f22d9cc754226a62e..a31d9520dad392d94f4956e0f8546926cb917668 100644 (file)
@@ -25,7 +25,7 @@ use bitcoin::secp256k1::Secp256k1;
 
 use crate::prelude::*;
 
-use crate::ln::functional_test_utils::*;
+use crate::ln::{functional_test_utils::*, ChannelId};
 
 fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        // Our on-chain HTLC-claim learning has a few properties worth testing:
@@ -531,7 +531,7 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
-       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+       assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
 
        let remote_txn_a = get_local_commitment_txn!(nodes[0], chan_id);
        let remote_txn_b = get_local_commitment_txn!(nodes[1], chan_id);
index ef9620fd295fc4de23dbdd1306744b787cf4436e..61d2233ef48ba18dba6a15e95d405bde2cb47e49 100644 (file)
@@ -51,7 +51,7 @@ fn pre_funding_lock_shutdown_test() {
        mine_transaction(&nodes[0], &tx);
        mine_transaction(&nodes[1], &tx);
 
-       nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.close_channel(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
        let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
@@ -68,8 +68,8 @@ fn pre_funding_lock_shutdown_test() {
 
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[1].node.list_channels().is_empty());
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 8000000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 8000000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 8000000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 8000000);
 }
 
 #[test]
@@ -113,8 +113,8 @@ fn expect_channel_shutdown_state() {
 
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[1].node.list_channels().is_empty());
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -196,8 +196,8 @@ fn expect_channel_shutdown_state_with_htlc() {
        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
        let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
        assert!(node_1_none.is_none());
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 
        // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary
        assert!(nodes[0].node.list_channels().is_empty());
@@ -251,8 +251,8 @@ fn test_lnd_bug_6039() {
        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
        let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
        assert!(node_1_none.is_none());
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 
        // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary
        assert!(nodes[0].node.list_channels().is_empty());
@@ -404,8 +404,8 @@ fn updates_shutdown_wait() {
        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
        let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
        assert!(node_1_none.is_none());
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 
        assert!(nodes[0].node.list_channels().is_empty());
 
@@ -414,8 +414,8 @@ fn updates_shutdown_wait() {
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
        assert!(nodes[1].node.list_channels().is_empty());
        assert!(nodes[2].node.list_channels().is_empty());
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -509,9 +509,27 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) {
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
        assert!(nodes[1].node.list_channels().is_empty());
        assert!(nodes[2].node.list_channels().is_empty());
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 2, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id(), nodes[2].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       let event1 = ExpectedCloseEvent {
+               channel_capacity_sats: Some(100000),
+               channel_id: None,
+               counterparty_node_id: Some(nodes[0].node.get_our_node_id()),
+               discard_funding: false,
+               reason: Some(ClosureReason::LocallyInitiatedCooperativeClosure),
+               channel_funding_txo: None,
+               user_channel_id: None,
+       };
+       let event2 = ExpectedCloseEvent {
+               channel_capacity_sats: Some(100000),
+               channel_id: None,
+               counterparty_node_id: Some(nodes[2].node.get_our_node_id()),
+               discard_funding: false,
+               reason: Some(ClosureReason::CounterpartyInitiatedCooperativeClosure),
+               channel_funding_txo: None,
+               user_channel_id: None,
+       };
+       check_closed_events(&nodes[1], &[event1, event2]);
+       check_closed_event!(nodes[2], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 fn do_test_shutdown_rebroadcast(recv_count: u8) {
@@ -652,7 +670,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
                let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
                assert!(node_1_none.is_none());
-               check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+               check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        } else {
                // If one node, however, received + responded with an identical closing_signed we end
                // up erroring and node[0] will try to broadcast its own latest commitment transaction.
@@ -692,9 +710,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
        assert!(nodes[1].node.list_channels().is_empty());
        assert!(nodes[2].node.list_channels().is_empty());
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -721,7 +739,7 @@ fn test_upfront_shutdown_script() {
 
        // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000);
-       nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[2].node.get_our_node_id()).unwrap();
+       nodes[0].node.close_channel(&chan.2, &nodes[2].node.get_our_node_id()).unwrap();
        let node_0_orig_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
        let mut node_0_shutdown = node_0_orig_shutdown.clone();
        node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
@@ -736,7 +754,7 @@ fn test_upfront_shutdown_script() {
 
        // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000);
-       nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[2].node.get_our_node_id()).unwrap();
+       nodes[0].node.close_channel(&chan.2, &nodes[2].node.get_our_node_id()).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
        // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
        nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
@@ -750,7 +768,7 @@ fn test_upfront_shutdown_script() {
        // We test that if case of peer non-signaling we don't enforce committed script at channel opening
        *nodes[0].override_init_features.borrow_mut() = Some(nodes[0].node.init_features().clear_upfront_shutdown_script());
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
-       nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
        let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown);
        check_added_monitors!(nodes[1], 1);
@@ -765,7 +783,7 @@ fn test_upfront_shutdown_script() {
        // channel smoothly, opt-out is from channel initiator here
        *nodes[0].override_init_features.borrow_mut() = None;
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000);
-       nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
        let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
@@ -779,7 +797,7 @@ fn test_upfront_shutdown_script() {
        //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
        //// channel smoothly
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
-       nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
        let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
@@ -894,7 +912,7 @@ fn test_segwit_v0_shutdown_script() {
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-       nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
 
        // Use a segwit v0 script supported even without option_shutdown_anysegwit
@@ -929,7 +947,7 @@ fn test_anysegwit_shutdown_script() {
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-       nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
 
        // Use a non-v0 segwit script supported by option_shutdown_anysegwit
@@ -975,14 +993,14 @@ fn test_unsupported_anysegwit_shutdown_script() {
                .expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script });
 
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-       match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()) {
+       match nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()) {
                Err(APIError::IncompatibleShutdownScript { script }) => {
                        assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner());
                },
                Err(e) => panic!("Unexpected error: {:?}", e),
                Ok(_) => panic!("Expected error"),
        }
-       nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
 
        // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit
@@ -1007,7 +1025,7 @@ fn test_invalid_shutdown_script() {
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-       nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
 
        // Use a segwit v0 script with an unsupported witness program
@@ -1041,7 +1059,7 @@ fn test_user_shutdown_script() {
        let shutdown_script = ShutdownScript::try_from(script.clone()).unwrap();
 
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-       nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap();
+       nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap();
        check_added_monitors!(nodes[1], 1);
 
        let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
@@ -1068,7 +1086,7 @@ fn test_already_set_user_shutdown_script() {
        let shutdown_script = ShutdownScript::try_from(script).unwrap();
 
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-       let result = nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script));
+       let result = nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script));
 
        assert_eq!(result, Err(APIError::APIMisuseError { err: "Cannot override shutdown script for a channel with one already set".to_string() }));
 }
@@ -1135,9 +1153,9 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
                let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
                if timeout_step == TimeoutStep::NoTimeout {
                        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.1.unwrap());
-                       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
                }
-               check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+               check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        }
 
        if timeout_step != TimeoutStep::NoTimeout {
@@ -1200,7 +1218,7 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) {
                *feerate_lock *= 10;
        }
 
-       nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
        let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
@@ -1221,8 +1239,8 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) {
        nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
        let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
        assert!(node_0_none.is_none());
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1240,7 +1258,7 @@ fn simple_target_feerate_shutdown() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-       let chan_id = OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id();
+       let chan_id = chan.2;
 
        nodes[0].node.close_channel_with_feerate_and_script(&chan_id, &nodes[1].node.get_our_node_id(), Some(253 * 10), None).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
@@ -1276,8 +1294,8 @@ fn simple_target_feerate_shutdown() {
        nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
        let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
        assert!(node_0_none.is_none());
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 fn do_outbound_update_no_early_closing_signed(use_htlc: bool) {
@@ -1369,8 +1387,8 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) {
        let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
        assert!(node_1_none.is_none());
 
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
index bad3ae96bbaa80c01a8a60e05dc3f8fde7bdf06d..b2d4af740b1bed743d999e54b36e8a3b0ab7159b 100644 (file)
@@ -813,8 +813,8 @@ where
                }
        }
 
-       #[cfg(test)]
-       pub(super) fn send_onion_message_using_path<T: OnionMessageContents>(
+       #[cfg(any(test, feature = "_test_utils"))]
+       pub fn send_onion_message_using_path<T: OnionMessageContents>(
                &self, path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>
        ) -> Result<SendSuccess, SendError> {
                self.enqueue_onion_message(path, contents, reply_path, format_args!(""))
index 9c8fd40af1358f1e1c8ae04646607da68a4e7abe..4c7a3762a278fa1e768af4034de4b5b98f014319 100644 (file)
@@ -1032,13 +1032,13 @@ impl<'a> DirectedChannelInfo<'a> {
        ///
        /// Refers to the `node_id` forwarding the payment to the next hop.
        #[inline]
-       pub(super) fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } }
+       pub fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } }
 
        /// Returns the `node_id` of the target hop.
        ///
        /// Refers to the `node_id` receiving the payment from the previous hop.
        #[inline]
-       pub(super) fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } }
+       pub fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } }
 }
 
 impl<'a> fmt::Debug for DirectedChannelInfo<'a> {
index 203c544e0096385c959d124f051bdc15295e958e..f962251cd65bd7baf072a9bcbfa0d731d4bfec64 100644 (file)
@@ -7,10 +7,9 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-use crate::chain::transaction::OutPoint;
+use crate::ln::ChannelId;
 use crate::sign::SpendableOutputDescriptor;
 
-use bitcoin::hash_types::Txid;
 use bitcoin::blockdata::transaction::Transaction;
 
 use crate::routing::router::Route;
@@ -38,27 +37,17 @@ macro_rules! log_bytes {
        }
 }
 
-pub(crate) struct DebugFundingChannelId<'a>(pub &'a Txid, pub u16);
-impl<'a> core::fmt::Display for DebugFundingChannelId<'a> {
+pub(crate) struct DebugFundingInfo<'a>(pub &'a ChannelId);
+impl<'a> core::fmt::Display for DebugFundingInfo<'a> {
        fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
-               (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().fmt(f)
-       }
-}
-macro_rules! log_funding_channel_id {
-       ($funding_txid: expr, $funding_txo: expr) => {
-               $crate::util::macro_logger::DebugFundingChannelId(&$funding_txid, $funding_txo)
-       }
-}
-
-pub(crate) struct DebugFundingInfo<'a, T: 'a>(pub &'a (OutPoint, T));
-impl<'a, T> core::fmt::Display for DebugFundingInfo<'a, T> {
-       fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
-               (self.0).0.to_channel_id().fmt(f)
+               self.0.fmt(f)
        }
 }
 macro_rules! log_funding_info {
        ($key_storage: expr) => {
-               $crate::util::macro_logger::DebugFundingInfo(&$key_storage.get_funding_txo())
+               $crate::util::macro_logger::DebugFundingInfo(
+                       &$key_storage.channel_id()
+               )
        }
 }
 
index f2116032786ce9c6bbed6de0c4e1d82883874ba0..6ce00acab451e1aa00b3a541c7392e22be63ca50 100644 (file)
@@ -20,6 +20,7 @@ pub mod ser;
 pub mod message_signing;
 pub mod invoice;
 pub mod persist;
+pub mod scid_utils;
 pub mod string;
 pub mod wakers;
 #[cfg(fuzzing)]
@@ -30,7 +31,6 @@ pub(crate) mod base32;
 pub(crate) mod atomic_counter;
 pub(crate) mod byte_utils;
 pub(crate) mod transaction_utils;
-pub(crate) mod scid_utils;
 pub(crate) mod time;
 
 pub mod indexed_map;
index e63290620516ee0b6f62b7bb57c0c6d2b9369d49..7d501345c3ce0d9a2c9c717ae41843b5fb79d79e 100644 (file)
@@ -1052,9 +1052,9 @@ mod tests {
                {
                        let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                        let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-                       let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+                       let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
                        let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
-                       let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
+                       let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
                        let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
 
                        let ro_persister = MonitorUpdatingPersister {
index 45b24fd14b042e563e7d64f9dc978684f21eab3a..9d943b4d466e5af801607d64f5687e4395588bb9 100644 (file)
@@ -7,6 +7,8 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
+//! Utilities for creating and parsing short channel ids.
+
 /// Maximum block height that can be used in a `short_channel_id`. This
 /// value is based on the 3-bytes available for block height.
 pub const MAX_SCID_BLOCK: u64 = 0x00ffffff;
@@ -22,8 +24,11 @@ pub const MAX_SCID_VOUT_INDEX: u64 = 0xffff;
 /// A `short_channel_id` construction error
 #[derive(Debug, PartialEq, Eq)]
 pub enum ShortChannelIdError {
+       /// Block height too high
        BlockOverflow,
+       /// Tx index too high
        TxIndexOverflow,
+       /// Vout index too high
        VoutIndexOverflow,
 }
 
@@ -91,8 +96,11 @@ pub(crate) mod fake_scid {
        /// into the fake scid.
        #[derive(Copy, Clone)]
        pub(crate) enum Namespace {
+               /// Phantom nodes namespace
                Phantom,
+               /// SCID aliases for outbound private channels
                OutboundAlias,
+               /// Payment interception namespace
                Intercept
        }
 
index e5489ac8a2e1a85dfaa048ecf458ce8aa5ea8f10..d77575d90b26dd709e71c3fc931a5d08f35b0338 100644 (file)
@@ -340,7 +340,7 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                        &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
                assert!(new_monitor == monitor);
-               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+               self.latest_monitor_update_id.lock().unwrap().insert(monitor.channel_id(),
                        (funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor)));
                self.added_monitors.lock().unwrap().push((funding_txo, monitor));
                self.chain_monitor.watch_channel(funding_txo, new_monitor)
@@ -352,18 +352,19 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
                update.write(&mut w).unwrap();
                assert!(channelmonitor::ChannelMonitorUpdate::read(
                                &mut io::Cursor::new(&w.0)).unwrap() == *update);
+               let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
 
-               self.monitor_updates.lock().unwrap().entry(funding_txo.to_channel_id()).or_insert(Vec::new()).push(update.clone());
+               self.monitor_updates.lock().unwrap().entry(channel_id).or_insert(Vec::new()).push(update.clone());
 
                if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() {
-                       assert_eq!(funding_txo.to_channel_id(), exp.0);
+                       assert_eq!(channel_id, exp.0);
                        assert_eq!(update.updates.len(), 1);
                        if let channelmonitor::ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
                                assert_eq!(should_broadcast, exp.1);
                        } else { panic!(); }
                }
 
-               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+               self.latest_monitor_update_id.lock().unwrap().insert(channel_id,
                        (funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(update)));
                let update_res = self.chain_monitor.update_channel(funding_txo, update);
                // At every point where we get a monitor update, we should be able to send a useful monitor
@@ -374,7 +375,7 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                        &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
                if let Some(chan_id) = self.expect_monitor_round_trip_fail.lock().unwrap().take() {
-                       assert_eq!(chan_id, funding_txo.to_channel_id());
+                       assert_eq!(chan_id, channel_id);
                        assert!(new_monitor != *monitor);
                } else {
                        assert!(new_monitor == *monitor);
@@ -383,7 +384,7 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
                update_res
        }
 
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
 }