Merge pull request #2430 from TheBlueMatt/2023-07-116-bindings-part-1
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 20 Jul 2023 21:42:44 +0000 (21:42 +0000)
committerGitHub <noreply@github.com>
Thu, 20 Jul 2023 21:42:44 +0000 (21:42 +0000)
Assorted 0.0.116 Bindings updates

15 files changed:
Cargo.toml
ci/ci-tests.sh
lightning-net-tokio/Cargo.toml
lightning-net-tokio/src/lib.rs
lightning/src/chain/chaininterface.rs
lightning/src/chain/onchaintx.rs
lightning/src/events/mod.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/util/logger.rs
lightning/src/util/macro_logger.rs
msrv-no-dev-deps-check/Cargo.toml [new file with mode: 0644]
msrv-no-dev-deps-check/src/lib.rs [new file with mode: 0644]

index a3acccfdaea91614b98421590dd567d1a678ef58..8614cb48c1f15207023d9fd00b952600804d4e1b 100644 (file)
@@ -14,6 +14,7 @@ exclude = [
     "lightning-custom-message",
     "lightning-transaction-sync",
     "no-std-check",
+    "msrv-no-dev-deps-check",
     "bench",
 ]
 
index e8137380705e1d33da8f6d090b8987853907b2d6..7a16b749200de77219c7268eb79fe793fe0d1bfb 100755 (executable)
@@ -4,18 +4,33 @@ set -eox pipefail
 RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }')
 HOST_PLATFORM="$(rustc --version --verbose | grep "host:" | awk '{ print $2 }')"
 
-# Tokio MSRV on versions 1.17 through 1.26 is rustc 1.49. Above 1.26 MSRV is 1.56.
-[ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.1" --verbose
-[[ "$RUSTC_MINOR_VERSION" -gt 48  &&  "$RUSTC_MINOR_VERSION" -lt 56 ]] && cargo update -p tokio --precise "1.25.1" --verbose
+# Some crates require pinning to meet our MSRV even for our downstream users,
+# which we do here.
+# Further crates which appear only as dev-dependencies are pinned further down.
+function PIN_RELEASE_DEPS {
+       # Tokio MSRV on versions 1.17 through 1.26 is rustc 1.49. Above 1.26 MSRV is 1.56.
+       [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.1" --verbose
+       [[ "$RUSTC_MINOR_VERSION" -gt 48  &&  "$RUSTC_MINOR_VERSION" -lt 56 ]] && cargo update -p tokio --precise "1.25.1" --verbose
 
-# Sadly the log crate is always a dependency of tokio until 1.20, and has no reasonable MSRV guarantees
-[ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p log --precise "0.4.18" --verbose
+       # Sadly the log crate is always a dependency of tokio until 1.20, and has no reasonable MSRV guarantees
+       [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p log --precise "0.4.18" --verbose
+
+       # The serde_json crate switched to Rust edition 2021 starting with v1.0.101, i.e., has MSRV of 1.56
+       [ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p serde_json --precise "1.0.100" --verbose
+
+       return 0 # Don't fail the script if our rustc is higher than the last check
+}
+
+PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
 
 # The addr2line v0.20 crate (a dependency of `backtrace` starting with 0.3.68) relies on 1.55+
 [ "$RUSTC_MINOR_VERSION" -lt 55 ] && cargo update -p backtrace --precise "0.3.67" --verbose
 
-# The serde_json crate switched to Rust edition 2021 starting with v1.0.101, i.e., has MSRV of 1.56
-[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p serde_json --precise "1.0.100" --verbose
+# The quote crate switched to Rust edition 2021 starting with v1.0.31, i.e., has MSRV of 1.56
+[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p quote --precise "1.0.30" --verbose
+
+# The proc-macro2 crate switched to Rust edition 2021 starting with v1.0.66, i.e., has MSRV of 1.56
+[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p proc-macro2 --precise "1.0.65" --verbose
 
 [ "$LDK_COVERAGE_BUILD" != "" ] && export RUSTFLAGS="-C link-dead-code"
 
@@ -62,7 +77,18 @@ popd
 echo -e "\n\nTesting no-std build on a downstream no-std crate"
 # check no-std compatibility across dependencies
 pushd no-std-check
-cargo check --verbose --color always --features lightning-transaction-sync
+if [[ $RUSTC_MINOR_VERSION -gt 67 ]]; then
+       # lightning-transaction-sync's MSRV is 1.67
+       cargo check --verbose --color always --features lightning-transaction-sync
+else
+       cargo check --verbose --color always
+fi
+popd
+
+# Test that we can build downstream code with only the "release pins".
+pushd msrv-no-dev-deps-check
+PIN_RELEASE_DEPS
+cargo check
 popd
 
 if [ -f "$(which arm-none-eabi-gcc)" ]; then
index 202849bee49af822bdc47d7045e80734cfa59332..0e5b285993b47966f2b0e69817549fc51672a4a1 100644 (file)
@@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.116-rc1", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
+tokio = { version = "1.0", features = [ "io-util", "rt", "sync", "net", "time" ] }
 
 [dev-dependencies]
 tokio = { version = "1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
index 724a57fb15c19bb1ba9c9c09bd3fbe44fc3a6478..d6d8004164bf9ab54db94ee64b55ef63f5f57e74 100644 (file)
@@ -42,16 +42,79 @@ use lightning::ln::peer_handler::APeerManager;
 use lightning::ln::msgs::NetAddress;
 
 use std::ops::Deref;
-use std::task;
+use std::task::{self, Poll};
+use std::future::Future;
 use std::net::SocketAddr;
 use std::net::TcpStream as StdTcpStream;
 use std::sync::{Arc, Mutex};
 use std::sync::atomic::{AtomicU64, Ordering};
 use std::time::Duration;
+use std::pin::Pin;
 use std::hash::Hash;
 
 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
 
+// We only need to select over multiple futures in one place, and taking on the full `tokio/macros`
+// dependency tree in order to do so (which has broken our MSRV before) is excessive. Instead, we
+// define a trivial two- and three- select macro with the specific types we need and just use that.
+
+pub(crate) enum SelectorOutput {
+       A(Option<()>), B(Option<()>), C(tokio::io::Result<usize>),
+}
+
+pub(crate) struct TwoSelector<
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin
+> {
+       pub a: A,
+       pub b: B,
+}
+
+impl<
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin
+> Future for TwoSelector<A, B> {
+       type Output = SelectorOutput;
+       fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
+               match Pin::new(&mut self.a).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::A(res)); },
+                       Poll::Pending => {},
+               }
+               match Pin::new(&mut self.b).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::B(res)); },
+                       Poll::Pending => {},
+               }
+               Poll::Pending
+       }
+}
+
+pub(crate) struct ThreeSelector<
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+> {
+       pub a: A,
+       pub b: B,
+       pub c: C,
+}
+
+impl<
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+> Future for ThreeSelector<A, B, C> {
+       type Output = SelectorOutput;
+       fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
+               match Pin::new(&mut self.a).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::A(res)); },
+                       Poll::Pending => {},
+               }
+               match Pin::new(&mut self.b).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::B(res)); },
+                       Poll::Pending => {},
+               }
+               match Pin::new(&mut self.c).poll(ctx) {
+                       Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
+                       Poll::Pending => {},
+               }
+               Poll::Pending
+       }
+}
+
 /// Connection contains all our internal state for a connection - we hold a reference to the
 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
 /// read future (which is returned by schedule_read).
@@ -127,29 +190,44 @@ impl Connection {
                                }
                                us_lock.read_paused
                        };
-                       tokio::select! {
-                               v = write_avail_receiver.recv() => {
+                       // TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support.
+                       let select_result = if read_paused {
+                               TwoSelector {
+                                       a: Box::pin(write_avail_receiver.recv()),
+                                       b: Box::pin(read_wake_receiver.recv()),
+                               }.await
+                       } else {
+                               ThreeSelector {
+                                       a: Box::pin(write_avail_receiver.recv()),
+                                       b: Box::pin(read_wake_receiver.recv()),
+                                       c: Box::pin(reader.read(&mut buf)),
+                               }.await
+                       };
+                       match select_result {
+                               SelectorOutput::A(v) => {
                                        assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
                                        if peer_manager.as_ref().write_buffer_space_avail(&mut our_descriptor).is_err() {
                                                break Disconnect::CloseConnection;
                                        }
                                },
-                               _ = read_wake_receiver.recv() => {},
-                               read = reader.read(&mut buf), if !read_paused => match read {
-                                       Ok(0) => break Disconnect::PeerDisconnected,
-                                       Ok(len) => {
-                                               let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
-                                               let mut us_lock = us.lock().unwrap();
-                                               match read_res {
-                                                       Ok(pause_read) => {
-                                                               if pause_read {
-                                                                       us_lock.read_paused = true;
-                                                               }
-                                                       },
-                                                       Err(_) => break Disconnect::CloseConnection,
-                                               }
-                                       },
-                                       Err(_) => break Disconnect::PeerDisconnected,
+                               SelectorOutput::B(_) => {},
+                               SelectorOutput::C(read) => {
+                                       match read {
+                                               Ok(0) => break Disconnect::PeerDisconnected,
+                                               Ok(len) => {
+                                                       let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
+                                                       let mut us_lock = us.lock().unwrap();
+                                                       match read_res {
+                                                               Ok(pause_read) => {
+                                                                       if pause_read {
+                                                                               us_lock.read_paused = true;
+                                                                       }
+                                                               },
+                                                               Err(_) => break Disconnect::CloseConnection,
+                                                       }
+                                               },
+                                               Err(_) => break Disconnect::PeerDisconnected,
+                                       }
                                },
                        }
                        let _ = event_waker.try_send(());
index 6073ed61aaf0cc88beea1c899b687fbb407fbc93..913fa007d378e91ba5666e3d34ae11c15010ce13 100644 (file)
@@ -68,6 +68,11 @@ pub enum ConfirmationTarget {
 /// A trait which should be implemented to provide feerate information on a number of time
 /// horizons.
 ///
+/// If access to a local mempool is not feasible, feerate estimates should be fetched from a set of
+/// third-parties hosting them. Note that this enables them to affect the propagation of your
+/// pre-signed transactions at any time and therefore endangers the safety of channels funds. It
+/// should be considered carefully as a deployment.
+///
 /// Note that all of the functions implemented here *must* be reentrant-safe (obviously - they're
 /// called from inside the library in response to chain events, P2P events, or timer events).
 pub trait FeeEstimator {
index 171caf7006e873615e11fd1f8ea0b999d5f24411..6ac4973a74441de88461576d26c9f8b69545d1c8 100644 (file)
@@ -633,11 +633,12 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                                                .compute_package_feerate(fee_estimator, conf_target, force_feerate_bump);
                                        if let Some(input_amount_sat) = output.funding_amount {
                                                let fee_sat = input_amount_sat - tx.output.iter().map(|output| output.value).sum::<u64>();
-                                               if compute_feerate_sat_per_1000_weight(fee_sat, tx.weight() as u64) >=
-                                                        package_target_feerate_sat_per_1000_weight
-                                               {
-                                                       log_debug!(logger, "Commitment transaction {} already meets required feerate {} sat/kW",
-                                                               tx.txid(), package_target_feerate_sat_per_1000_weight);
+                                               let commitment_tx_feerate_sat_per_1000_weight =
+                                                       compute_feerate_sat_per_1000_weight(fee_sat, tx.weight() as u64);
+                                               if commitment_tx_feerate_sat_per_1000_weight >= package_target_feerate_sat_per_1000_weight {
+                                                       log_debug!(logger, "Pre-signed {} already has feerate {} sat/kW above required {} sat/kW",
+                                                               log_tx!(tx), commitment_tx_feerate_sat_per_1000_weight,
+                                                               package_target_feerate_sat_per_1000_weight);
                                                        return Some((new_timer, 0, OnchainClaim::Tx(tx.clone())));
                                                }
                                        }
@@ -1120,7 +1121,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        }
 
        //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
-       // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
+       // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after OutboundV1Channel::get_funding_created,
        // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
        // to monitor before.
        pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
index a5a20e7d42b3e016d5ea219f1427cd3dba0e67c2..d08e563cbf6195e4566bae0691b783e947033156 100644 (file)
@@ -113,7 +113,7 @@ impl_writeable_tlv_based_enum_upgradable!(PathFailure,
 );
 
 #[derive(Clone, Debug, PartialEq, Eq)]
-/// The reason the channel was closed. See individual variants more details.
+/// The reason the channel was closed. See individual variants for more details.
 pub enum ClosureReason {
        /// Closure generated from receiving a peer error message.
        ///
@@ -164,7 +164,10 @@ pub enum ClosureReason {
        ///
        /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
-       OutdatedChannelManager
+       OutdatedChannelManager,
+       /// The counterparty requested a cooperative close of a channel that had not been funded yet.
+       /// The channel has been immediately closed.
+       CounterpartyCoopClosedUnfundedChannel,
 }
 
 impl core::fmt::Display for ClosureReason {
@@ -184,6 +187,7 @@ impl core::fmt::Display for ClosureReason {
                        },
                        ClosureReason::DisconnectedPeer => f.write_str("the peer disconnected prior to the channel being funded"),
                        ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"),
+                       ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"),
                }
        }
 }
@@ -197,6 +201,7 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
        (8, ProcessingError) => { (1, err, required) },
        (10, DisconnectedPeer) => {},
        (12, OutdatedChannelManager) => {},
+       (13, CounterpartyCoopClosedUnfundedChannel) => {},
 );
 
 /// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
index 7379c7128e182ee55617fd5cd9fe5491c52ad9a5..e74f659fbd09ba04a6fc537fd9525b3c8f56a7c6 100644 (file)
@@ -590,6 +590,11 @@ pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
 
+/// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
+/// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
+/// exceeding this age limit will be force-closed and purged from memory.
+pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
+
 struct PendingChannelMonitorUpdate {
        update: ChannelMonitorUpdate,
 }
@@ -598,6 +603,28 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
        (0, update, required),
 });
 
+/// Contains all state common to unfunded inbound/outbound channels.
+pub(super) struct UnfundedChannelContext {
+       /// A counter tracking how many ticks have elapsed since this unfunded channel was
+       /// created. If this unfunded channel reaches peer has yet to respond after reaching
+       /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
+       ///
+       /// This is so that we don't keep channels around that haven't progressed to a funded state
+       /// in a timely manner.
+       unfunded_channel_age_ticks: usize,
+}
+
+impl UnfundedChannelContext {
+       /// Determines whether we should force-close and purge this unfunded channel from memory due to it
+       /// having reached the unfunded channel age limit.
+       ///
+       /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
+       pub fn should_expire_unfunded_channel(&mut self) -> bool {
+               self.unfunded_channel_age_ticks += 1;
+               self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
+       }
+}
+
 /// Contains everything about the channel including state, and various flags.
 pub(super) struct ChannelContext<Signer: ChannelSigner> {
        config: LegacyChannelConfig,
@@ -995,7 +1022,7 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
        }
 
        /// Returns the funding_txo we either got from our peer, or were given by
-       /// get_outbound_funding_created.
+       /// get_funding_created.
        pub fn get_funding_txo(&self) -> Option<OutPoint> {
                self.channel_transaction_parameters.funding_outpoint
        }
@@ -1440,7 +1467,7 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
        #[inline]
        /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
        /// will sign and send to our counterparty.
-       /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
+       /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
        fn build_remote_transaction_keys(&self) -> TxCreationKeys {
                //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
                //may see payments to it!
@@ -2026,7 +2053,7 @@ fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_featur
 
 // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
 // has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
+// calling channel_id() before we're set up or things like get_funding_signed on an
 // inbound channel.
 //
 // Holder designates channel data owned for the benefit of the user client.
@@ -5477,6 +5504,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
 pub(super) struct OutboundV1Channel<Signer: ChannelSigner> {
        pub context: ChannelContext<Signer>,
+       pub unfunded_context: UnfundedChannelContext,
 }
 
 impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
@@ -5678,12 +5706,13 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
                                channel_keys_id,
 
                                blocked_monitor_updates: Vec::new(),
-                       }
+                       },
+                       unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                })
        }
 
-       /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
-       fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
+       /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
+       fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
                let counterparty_keys = self.context.build_remote_transaction_keys();
                let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
                Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
@@ -5697,7 +5726,7 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_outbound_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
+       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
        -> Result<(Channel<Signer>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
                if !self.context.is_outbound() {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
@@ -5714,7 +5743,7 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
                self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
                self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
 
-               let signature = match self.get_outbound_funding_created_signature(logger) {
+               let signature = match self.get_funding_created_signature(logger) {
                        Ok(res) => res,
                        Err(e) => {
                                log_error!(logger, "Got bad signatures: {:?}!", e);
@@ -5983,6 +6012,7 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
 pub(super) struct InboundV1Channel<Signer: ChannelSigner> {
        pub context: ChannelContext<Signer>,
+       pub unfunded_context: UnfundedChannelContext,
 }
 
 impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {
@@ -6310,7 +6340,8 @@ impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {
                                channel_keys_id,
 
                                blocked_monitor_updates: Vec::new(),
-                       }
+                       },
+                       unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                };
 
                Ok(chan)
@@ -7598,7 +7629,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -7725,7 +7756,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -7913,7 +7944,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
index 982aed7529832f04832acca14e39211fd7f3ce36..b22c3716ccbbdb795981d94e270424e089e25392 100644 (file)
@@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::Bolt11InvoiceFeatures;
@@ -685,7 +685,7 @@ impl <Signer: ChannelSigner> PeerState<Signer> {
                        && self.in_flight_monitor_updates.is_empty()
        }
 
-       // Returns a count of all channels we have with this peer, including pending channels.
+       // Returns a count of all channels we have with this peer, including unfunded channels.
        fn total_channel_count(&self) -> usize {
                self.channel_by_id.len() +
                        self.outbound_v1_channel_by_id.len() +
@@ -1749,12 +1749,12 @@ macro_rules! convert_chan_err {
                        },
                }
        };
-       ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => {
+       ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => {
                match $err {
-                       // We should only ever have `ChannelError::Close` when prefunded channels error.
+                       // We should only ever have `ChannelError::Close` when unfunded channels error.
                        // In any case, just close the channel.
                        ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
+                               log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
                                update_maps_on_chan_removal!($self, &$channel_context);
                                let shutdown_res = $channel_context.force_shutdown(false);
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
@@ -1784,7 +1784,7 @@ macro_rules! try_v1_outbound_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED);
+                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED);
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -2246,6 +2246,7 @@ where
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
+                               // Only `Channels` in the channel_by_id map can be considered funded.
                                for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
                                                peer_state.latest_features.clone(), &self.fee_estimator);
@@ -2314,11 +2315,15 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        let features = &peer_state.latest_features;
+                       let chan_context_to_details = |context| {
+                               ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
+                       };
                        return peer_state.channel_by_id
                                .iter()
-                               .map(|(_, channel)|
-                                       ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                       features.clone(), &self.fee_estimator))
+                               .map(|(_, channel)| &channel.context)
+                               .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+                               .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+                               .map(chan_context_to_details)
                                .collect();
                }
                vec![]
@@ -2375,48 +2380,58 @@ where
 
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
                let result: Result<(), _> = loop {
-                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       {
+                               let per_peer_state = self.per_peer_state.read().unwrap();
 
-                       let peer_state_mutex = per_peer_state.get(counterparty_node_id)
-                               .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+                               let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+                                       .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
 
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let funding_txo_opt = chan_entry.get().context.get_funding_txo();
-                                       let their_features = &peer_state.latest_features;
-                                       let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
-                                               .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
-                                       failed_htlcs = htlcs;
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
 
-                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
-                                       // here as we don't need the monitor update to complete until we send a
-                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                               node_id: *counterparty_node_id,
-                                               msg: shutdown_msg,
-                                       });
+                               match peer_state.channel_by_id.entry(channel_id.clone()) {
+                                       hash_map::Entry::Occupied(mut chan_entry) => {
+                                               let funding_txo_opt = chan_entry.get().context.get_funding_txo();
+                                               let their_features = &peer_state.latest_features;
+                                               let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
+                                                       .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
+                                               failed_htlcs = htlcs;
 
-                                       // Update the monitor with the shutdown script if necessary.
-                                       if let Some(monitor_update) = monitor_update_opt.take() {
-                                               break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
-                                                       peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
-                                       }
+                                               // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                               // here as we don't need the monitor update to complete until we send a
+                                               // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                       node_id: *counterparty_node_id,
+                                                       msg: shutdown_msg,
+                                               });
 
-                                       if chan_entry.get().is_shutdown() {
-                                               let channel = remove_channel!(self, chan_entry);
-                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
-                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: channel_update
-                                                       });
+                                               // Update the monitor with the shutdown script if necessary.
+                                               if let Some(monitor_update) = monitor_update_opt.take() {
+                                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                               peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
                                                }
-                                               self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
-                                       }
-                                       break Ok(());
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) })
+
+                                               if chan_entry.get().is_shutdown() {
+                                                       let channel = remove_channel!(self, chan_entry);
+                                                       if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+                                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: channel_update
+                                                               });
+                                                       }
+                                                       self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
+                                               }
+                                               break Ok(());
+                                       },
+                                       hash_map::Entry::Vacant(_) => (),
+                               }
                        }
+                       // If we reach this point, it means that the channel_id either refers to an unfunded channel or
+                       // it does not exist for this peer. Either way, we can attempt to force-close it.
+                       //
+                       // An appropriate error will be returned for non-existence of the channel if that's the case.
+                       return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+                       // TODO(dunxen): This is still not ideal as we're doing some extra lookups.
+                       // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422
                };
 
                for htlc_source in failed_htlcs.drain(..) {
@@ -2535,14 +2550,14 @@ where
                                self.issue_channel_close_events(&chan.get().context, closure_reason);
                                let mut chan = remove_channel!(self, chan);
                                self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               // Prefunded channel has no update
+                               // Unfunded channel has no update
                                (None, chan.context.get_counterparty_node_id())
                        } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
                                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
                                self.issue_channel_close_events(&chan.get().context, closure_reason);
                                let mut chan = remove_channel!(self, chan);
                                self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               // Prefunded channel has no update
+                               // Unfunded channel has no update
                                (None, chan.context.get_counterparty_node_id())
                        } else {
                                return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
@@ -3350,7 +3365,7 @@ where
                        Some(chan) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
-                               let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+                               let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
                                        .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
                                                let channel_id = chan.context.channel_id();
                                                let user_id = chan.context.get_user_id();
@@ -3523,27 +3538,48 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                for channel_id in channel_ids {
-                       if !peer_state.channel_by_id.contains_key(channel_id) {
+                       if !peer_state.has_channel(channel_id) {
                                return Err(APIError::ChannelUnavailable {
                                        err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
                                });
-                       }
+                       };
                }
                for channel_id in channel_ids {
-                       let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
-                       let mut config = channel.context.config();
-                       config.apply(config_update);
-                       if !channel.context.update_config(&config) {
+                       if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
+                               let mut config = channel.context.config();
+                               config.apply(config_update);
+                               if !channel.context.update_config(&config) {
+                                       continue;
+                               }
+                               if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+                               } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                               node_id: channel.context.get_counterparty_node_id(),
+                                               msg,
+                                       });
+                               }
                                continue;
                        }
-                       if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
-                       } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                       node_id: channel.context.get_counterparty_node_id(),
-                                       msg,
+
+                       let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) {
+                               &mut channel.context
+                       } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) {
+                               &mut channel.context
+                       } else {
+                               // This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
+                               debug_assert!(false);
+                               return Err(APIError::ChannelUnavailable {
+                                       err: format!(
+                                               "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
+                                               log_bytes!(*channel_id), counterparty_node_id),
                                });
-                       }
+                       };
+                       let mut config = context.config();
+                       config.apply(config_update);
+                       // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready`
+                       // which would be the case for pending inbound/outbound channels.
+                       context.update_config(&config);
                }
                Ok(())
        }
@@ -4290,6 +4326,7 @@ where
        ///  * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
        ///    with the current [`ChannelConfig`].
        ///  * Removing peers which have disconnected but and no longer have any channels.
+       ///  * Force-closing and removing channels which have not completed establishment in a timely manner.
        ///
        /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
        /// estimate fetches.
@@ -4384,6 +4421,26 @@ where
 
                                                true
                                        });
+
+                                       let process_unfunded_channel_tick = |
+                                               chan_id: &[u8; 32],
+                                               chan_context: &mut ChannelContext<<SP::Target as SignerProvider>::Signer>,
+                                               unfunded_chan_context: &mut UnfundedChannelContext,
+                                       | {
+                                               chan_context.maybe_expire_prev_config();
+                                               if unfunded_chan_context.should_expire_unfunded_channel() {
+                                                       log_error!(self.logger, "Force-closing pending outbound channel {} for not establishing in a timely manner", log_bytes!(&chan_id[..]));
+                                                       update_maps_on_chan_removal!(self, &chan_context);
+                                                       self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed);
+                                                       self.finish_force_close_channel(chan_context.force_shutdown(false));
+                                                       false
+                                               } else {
+                                                       true
+                                               }
+                                       };
+                                       peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context));
+                                       peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context));
+
                                        if peer_state.ok_to_remove(true) {
                                                pending_peers_awaiting_removal.push(counterparty_node_id);
                                        }
@@ -5494,38 +5551,50 @@ where
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut chan_entry) => {
-
-                                       if !chan_entry.get().received_shutdown() {
-                                               log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
-                                                       log_bytes!(msg.channel_id),
-                                                       if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
-                                       }
+                       // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per
+                       // https://github.com/lightningdevkit/rust-lightning/issues/2422
+                       if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
+                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+                               self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+                               let mut chan = remove_channel!(self, chan_entry);
+                               self.finish_force_close_channel(chan.context.force_shutdown(false));
+                               return Ok(());
+                       } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
+                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+                               self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+                               let mut chan = remove_channel!(self, chan_entry);
+                               self.finish_force_close_channel(chan.context.force_shutdown(false));
+                               return Ok(());
+                       } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+                               if !chan_entry.get().received_shutdown() {
+                                       log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+                                               log_bytes!(msg.channel_id),
+                                               if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+                               }
 
-                                       let funding_txo_opt = chan_entry.get().context.get_funding_txo();
-                                       let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
-                                               chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
-                                       dropped_htlcs = htlcs;
+                               let funding_txo_opt = chan_entry.get().context.get_funding_txo();
+                               let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
+                                       chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
+                               dropped_htlcs = htlcs;
 
-                                       if let Some(msg) = shutdown {
-                                               // We can send the `shutdown` message before updating the `ChannelMonitor`
-                                               // here as we don't need the monitor update to complete until we send a
-                                               // `shutdown_signed`, which we'll delay if we're pending a monitor update.
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: *counterparty_node_id,
-                                                       msg,
-                                               });
-                                       }
+                               if let Some(msg) = shutdown {
+                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                       // here as we don't need the monitor update to complete until we send a
+                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                               node_id: *counterparty_node_id,
+                                               msg,
+                                       });
+                               }
 
-                                       // Update the monitor with the shutdown script if necessary.
-                                       if let Some(monitor_update) = monitor_update_opt {
-                                               break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
-                                                       peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
-                                       }
-                                       break Ok(());
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                               // Update the monitor with the shutdown script if necessary.
+                               if let Some(monitor_update) = monitor_update_opt {
+                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                               peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
+                               }
+                               break Ok(());
+                       } else {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
@@ -7226,37 +7295,20 @@ where
                log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
 
                let per_peer_state = self.per_peer_state.read().unwrap();
-               for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        let pending_msg_events = &mut peer_state.pending_msg_events;
-                       peer_state.channel_by_id.retain(|_, chan| {
-                               let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
-                                       if !chan.context.have_received_message() {
-                                               // If we created this (outbound) channel while we were disconnected from the
-                                               // peer we probably failed to send the open_channel message, which is now
-                                               // lost. We can't have had anything pending related to this channel, so we just
-                                               // drop it.
-                                               false
-                                       } else {
-                                               pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                                       node_id: chan.context.get_counterparty_node_id(),
-                                                       msg: chan.get_channel_reestablish(&self.logger),
-                                               });
-                                               true
-                                       }
-                               } else { true };
-                               if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
-                                       if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
-                                               if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
-                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
-                                                               node_id: *counterparty_node_id,
-                                                               msg, update_msg,
-                                                       });
-                                               }
-                                       }
-                               }
-                               retain
+
+                       // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+                       // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and
+                       // clearing their maps here. Instead we can just send queue channel_reestablish messages for
+                       // channels in the channel_by_id map.
+                       peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| {
+                               pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+                                       node_id: chan.context.get_counterparty_node_id(),
+                                       msg: chan.get_channel_reestablish(&self.logger),
+                               });
                        });
                }
                //TODO: Also re-broadcast announcement_signatures
@@ -10186,6 +10238,25 @@ mod tests {
                        MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                        _ => panic!("expected BroadcastChannelUpdate event"),
                }
+
+               // If we provide a channel_id not associated with the peer, we should get an error and no updates
+               // should be applied to ensure update atomicity as specified in the API docs.
+               let bad_channel_id = [10; 32];
+               let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
+               let new_fee = current_fee + 100;
+               assert!(
+                       matches!(
+                               nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
+                                       forwarding_fee_proportional_millionths: Some(new_fee),
+                                       ..Default::default()
+                               }),
+                               Err(APIError::ChannelUnavailable { err: _ }),
+                       )
+               );
+               // Check that the fee hasn't changed for the channel that exists.
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 0);
        }
 }
 
index 220557e4ca3ecfbde2ab1c7dd9a443a6787b8925..84bc1a1b3f0656476aa1d00771f0368d97003cd0 100644 (file)
@@ -2861,13 +2861,6 @@ macro_rules! get_chan_reestablish_msgs {
                                        panic!("Unexpected event")
                                }
                        }
-                       for chan in $src_node.node.list_channels() {
-                               if chan.is_public && chan.counterparty.node_id != $dst_node.node.get_our_node_id() {
-                                       if let Some(scid) = chan.short_channel_id {
-                                               assert!(announcements.remove(&scid));
-                                       }
-                               }
-                       }
                        assert!(announcements.is_empty());
                        res
                }
index 271ff541a84981733090b30600956886c2845d40..8de2e9f631a53325ba5b6ba6b384392762017967 100644 (file)
@@ -61,6 +61,8 @@ use crate::sync::{Arc, Mutex};
 use crate::ln::functional_test_utils::*;
 use crate::ln::chan_utils::CommitmentTransaction;
 
+use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
+
 #[test]
 fn test_insane_channel_opens() {
        // Stand up a network of 2 nodes
@@ -8881,13 +8883,13 @@ fn test_duplicate_chan_id() {
        let (_, funding_created) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
+               // Once we call `get_funding_created` the channel has a duplicate channel_id as
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
                let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
                let logger = test_utils::TestLogger::new();
-               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+               as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
@@ -10017,3 +10019,89 @@ fn test_disconnects_peer_awaiting_response_ticks() {
                }
        }
 }
+
+#[test]
+fn test_remove_expired_outbound_unfunded_channels() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::FundingGenerationReady { .. } => (),
+               _ => panic!("Unexpected event"),
+       };
+
+       // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
+       let check_outbound_channel_existence = |should_exist: bool| {
+               let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+               let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
+               assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+       };
+
+       // Channel should exist without any timer ticks.
+       check_outbound_channel_existence(true);
+
+       // Channel should exist with 1 timer tick less than required.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+               nodes[0].node.timer_tick_occurred();
+               check_outbound_channel_existence(true)
+       }
+
+       // Remove channel after reaching the required ticks.
+       nodes[0].node.timer_tick_occurred();
+       check_outbound_channel_existence(false);
+
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+}
+
+#[test]
+fn test_remove_expired_inbound_unfunded_channels() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::FundingGenerationReady { .. } => (),
+               _ => panic!("Unexpected event"),
+       };
+
+       // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
+       let check_inbound_channel_existence = |should_exist: bool| {
+               let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
+               let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
+               assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+       };
+
+       // Channel should exist without any timer ticks.
+       check_inbound_channel_existence(true);
+
+       // Channel should exist with 1 timer tick less than required.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+               nodes[1].node.timer_tick_occurred();
+               check_inbound_channel_existence(true)
+       }
+
+       // Remove channel after reaching the required ticks.
+       nodes[1].node.timer_tick_occurred();
+       check_inbound_channel_existence(false);
+
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+}
index 722a81f7ab665b15961363a1f064377718e36dd3..dbca9b785e85dfbaf3c68253e7e47b91225c6bdc 100644 (file)
@@ -173,18 +173,15 @@ impl<'a> core::fmt::Display for DebugBytes<'a> {
 ///
 /// This is not exported to bindings users as fmt can't be used in C
 #[doc(hidden)]
-pub struct DebugIter<T: fmt::Display, I: core::iter::Iterator<Item = T> + Clone>(pub core::cell::RefCell<I>);
+pub struct DebugIter<T: fmt::Display, I: core::iter::Iterator<Item = T> + Clone>(pub I);
 impl<T: fmt::Display, I: core::iter::Iterator<Item = T> + Clone> fmt::Display for DebugIter<T, I> {
        fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
-               use core::ops::DerefMut;
                write!(f, "[")?;
-               let iter_ref = self.0.clone();
-               let mut iter = iter_ref.borrow_mut();
-               for item in iter.deref_mut() {
+               let mut iter = self.0.clone();
+               if let Some(item) = iter.next() {
                        write!(f, "{}", item)?;
-                       break;
                }
-               for item in iter.deref_mut() {
+               while let Some(item) = iter.next() {
                        write!(f, ", {}", item)?;
                }
                write!(f, "]")?;
index 4703bcdb32a2a00719f4d7a5eb1edd37aa2d75ec..e79980370342ff01920fe5a37b4423719a228ac7 100644 (file)
@@ -19,7 +19,7 @@ use crate::util::logger::DebugBytes;
 
 macro_rules! log_iter {
        ($obj: expr) => {
-               $crate::util::logger::DebugIter(core::cell::RefCell::new($obj))
+               $crate::util::logger::DebugIter($obj)
        }
 }
 
diff --git a/msrv-no-dev-deps-check/Cargo.toml b/msrv-no-dev-deps-check/Cargo.toml
new file mode 100644 (file)
index 0000000..d733607
--- /dev/null
@@ -0,0 +1,13 @@
+[package]
+name = "msrv-check"
+version = "0.1.0"
+edition = "2018"
+
+[dependencies]
+lightning = { path = "../lightning" }
+lightning-block-sync = { path = "../lightning-block-sync", features = [ "rest-client", "rpc-client" ] }
+lightning-invoice = { path = "../lightning-invoice" }
+lightning-net-tokio = { path = "../lightning-net-tokio" }
+lightning-persister = { path = "../lightning-persister" }
+lightning-background-processor = { path = "../lightning-background-processor", features = ["futures"] }
+lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
diff --git a/msrv-no-dev-deps-check/src/lib.rs b/msrv-no-dev-deps-check/src/lib.rs
new file mode 100644 (file)
index 0000000..e69de29