"lightning-custom-message",
"lightning-transaction-sync",
"no-std-check",
+ "msrv-no-dev-deps-check",
"bench",
]
RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }')
HOST_PLATFORM="$(rustc --version --verbose | grep "host:" | awk '{ print $2 }')"
-# Tokio MSRV on versions 1.17 through 1.26 is rustc 1.49. Above 1.26 MSRV is 1.56.
-[ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.1" --verbose
-[[ "$RUSTC_MINOR_VERSION" -gt 48 && "$RUSTC_MINOR_VERSION" -lt 56 ]] && cargo update -p tokio --precise "1.25.1" --verbose
+# Some crates require pinning to meet our MSRV even for our downstream users,
+# which we do here.
+# Further crates which appear only as dev-dependencies are pinned further down.
+function PIN_RELEASE_DEPS {
+ # Tokio MSRV on versions 1.17 through 1.26 is rustc 1.49. Above 1.26 MSRV is 1.56.
+ [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.1" --verbose
+ [[ "$RUSTC_MINOR_VERSION" -gt 48 && "$RUSTC_MINOR_VERSION" -lt 56 ]] && cargo update -p tokio --precise "1.25.1" --verbose
-# Sadly the log crate is always a dependency of tokio until 1.20, and has no reasonable MSRV guarantees
-[ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p log --precise "0.4.18" --verbose
+ # Sadly the log crate is always a dependency of tokio until 1.20, and has no reasonable MSRV guarantees
+ [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p log --precise "0.4.18" --verbose
+
+ # The serde_json crate switched to Rust edition 2021 starting with v1.0.101, i.e., has MSRV of 1.56
+ [ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p serde_json --precise "1.0.100" --verbose
+
+ return 0 # Don't fail the script if our rustc is higher than the last check
+}
+
+PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
# The addr2line v0.20 crate (a dependency of `backtrace` starting with 0.3.68) relies on 1.55+
[ "$RUSTC_MINOR_VERSION" -lt 55 ] && cargo update -p backtrace --precise "0.3.67" --verbose
-# The serde_json crate switched to Rust edition 2021 starting with v1.0.101, i.e., has MSRV of 1.56
-[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p serde_json --precise "1.0.100" --verbose
+# The quote crate switched to Rust edition 2021 starting with v1.0.31, i.e., has MSRV of 1.56
+[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p quote --precise "1.0.30" --verbose
+
+# The proc-macro2 crate switched to Rust edition 2021 starting with v1.0.66, i.e., has MSRV of 1.56
+[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p proc-macro2 --precise "1.0.65" --verbose
[ "$LDK_COVERAGE_BUILD" != "" ] && export RUSTFLAGS="-C link-dead-code"
echo -e "\n\nTesting no-std build on a downstream no-std crate"
# check no-std compatibility across dependencies
pushd no-std-check
-cargo check --verbose --color always --features lightning-transaction-sync
+if [[ $RUSTC_MINOR_VERSION -gt 67 ]]; then
+ # lightning-transaction-sync's MSRV is 1.67
+ cargo check --verbose --color always --features lightning-transaction-sync
+else
+ cargo check --verbose --color always
+fi
+popd
+
+# Test that we can build downstream code with only the "release pins".
+pushd msrv-no-dev-deps-check
+PIN_RELEASE_DEPS
+cargo check
popd
if [ -f "$(which arm-none-eabi-gcc)" ]; then
[dependencies]
bitcoin = "0.29.0"
lightning = { version = "0.0.116-rc1", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
+tokio = { version = "1.0", features = [ "io-util", "rt", "sync", "net", "time" ] }
[dev-dependencies]
tokio = { version = "1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
use lightning::ln::msgs::NetAddress;
use std::ops::Deref;
-use std::task;
+use std::task::{self, Poll};
+use std::future::Future;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Duration;
+use std::pin::Pin;
use std::hash::Hash;
static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
+// We only need to select over multiple futures in one place, and taking on the full `tokio/macros`
+// dependency tree in order to do so (which has broken our MSRV before) is excessive. Instead, we
+// define a trivial two- and three- select macro with the specific types we need and just use that.
+
+pub(crate) enum SelectorOutput {
+ A(Option<()>), B(Option<()>), C(tokio::io::Result<usize>),
+}
+
+pub(crate) struct TwoSelector<
+ A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin
+> {
+ pub a: A,
+ pub b: B,
+}
+
+impl<
+ A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin
+> Future for TwoSelector<A, B> {
+ type Output = SelectorOutput;
+ fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
+ match Pin::new(&mut self.a).poll(ctx) {
+ Poll::Ready(res) => { return Poll::Ready(SelectorOutput::A(res)); },
+ Poll::Pending => {},
+ }
+ match Pin::new(&mut self.b).poll(ctx) {
+ Poll::Ready(res) => { return Poll::Ready(SelectorOutput::B(res)); },
+ Poll::Pending => {},
+ }
+ Poll::Pending
+ }
+}
+
+pub(crate) struct ThreeSelector<
+ A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+> {
+ pub a: A,
+ pub b: B,
+ pub c: C,
+}
+
+impl<
+ A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+> Future for ThreeSelector<A, B, C> {
+ type Output = SelectorOutput;
+ fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
+ match Pin::new(&mut self.a).poll(ctx) {
+ Poll::Ready(res) => { return Poll::Ready(SelectorOutput::A(res)); },
+ Poll::Pending => {},
+ }
+ match Pin::new(&mut self.b).poll(ctx) {
+ Poll::Ready(res) => { return Poll::Ready(SelectorOutput::B(res)); },
+ Poll::Pending => {},
+ }
+ match Pin::new(&mut self.c).poll(ctx) {
+ Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
+ Poll::Pending => {},
+ }
+ Poll::Pending
+ }
+}
+
/// Connection contains all our internal state for a connection - we hold a reference to the
/// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
/// read future (which is returned by schedule_read).
}
us_lock.read_paused
};
- tokio::select! {
- v = write_avail_receiver.recv() => {
+ // TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support.
+ let select_result = if read_paused {
+ TwoSelector {
+ a: Box::pin(write_avail_receiver.recv()),
+ b: Box::pin(read_wake_receiver.recv()),
+ }.await
+ } else {
+ ThreeSelector {
+ a: Box::pin(write_avail_receiver.recv()),
+ b: Box::pin(read_wake_receiver.recv()),
+ c: Box::pin(reader.read(&mut buf)),
+ }.await
+ };
+ match select_result {
+ SelectorOutput::A(v) => {
assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
if peer_manager.as_ref().write_buffer_space_avail(&mut our_descriptor).is_err() {
break Disconnect::CloseConnection;
}
},
- _ = read_wake_receiver.recv() => {},
- read = reader.read(&mut buf), if !read_paused => match read {
- Ok(0) => break Disconnect::PeerDisconnected,
- Ok(len) => {
- let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
- let mut us_lock = us.lock().unwrap();
- match read_res {
- Ok(pause_read) => {
- if pause_read {
- us_lock.read_paused = true;
- }
- },
- Err(_) => break Disconnect::CloseConnection,
- }
- },
- Err(_) => break Disconnect::PeerDisconnected,
+ SelectorOutput::B(_) => {},
+ SelectorOutput::C(read) => {
+ match read {
+ Ok(0) => break Disconnect::PeerDisconnected,
+ Ok(len) => {
+ let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
+ let mut us_lock = us.lock().unwrap();
+ match read_res {
+ Ok(pause_read) => {
+ if pause_read {
+ us_lock.read_paused = true;
+ }
+ },
+ Err(_) => break Disconnect::CloseConnection,
+ }
+ },
+ Err(_) => break Disconnect::PeerDisconnected,
+ }
},
}
let _ = event_waker.try_send(());
/// A trait which should be implemented to provide feerate information on a number of time
/// horizons.
///
+/// If access to a local mempool is not feasible, feerate estimates should be fetched from a set of
+/// third-parties hosting them. Note that this enables them to affect the propagation of your
+/// pre-signed transactions at any time and therefore endangers the safety of channels funds. It
+/// should be considered carefully as a deployment.
+///
/// Note that all of the functions implemented here *must* be reentrant-safe (obviously - they're
/// called from inside the library in response to chain events, P2P events, or timer events).
pub trait FeeEstimator {
.compute_package_feerate(fee_estimator, conf_target, force_feerate_bump);
if let Some(input_amount_sat) = output.funding_amount {
let fee_sat = input_amount_sat - tx.output.iter().map(|output| output.value).sum::<u64>();
- if compute_feerate_sat_per_1000_weight(fee_sat, tx.weight() as u64) >=
- package_target_feerate_sat_per_1000_weight
- {
- log_debug!(logger, "Commitment transaction {} already meets required feerate {} sat/kW",
- tx.txid(), package_target_feerate_sat_per_1000_weight);
+ let commitment_tx_feerate_sat_per_1000_weight =
+ compute_feerate_sat_per_1000_weight(fee_sat, tx.weight() as u64);
+ if commitment_tx_feerate_sat_per_1000_weight >= package_target_feerate_sat_per_1000_weight {
+ log_debug!(logger, "Pre-signed {} already has feerate {} sat/kW above required {} sat/kW",
+ log_tx!(tx), commitment_tx_feerate_sat_per_1000_weight,
+ package_target_feerate_sat_per_1000_weight);
return Some((new_timer, 0, OnchainClaim::Tx(tx.clone())));
}
}
}
//TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
- // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
+ // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after OutboundV1Channel::get_funding_created,
// before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
// to monitor before.
pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
);
#[derive(Clone, Debug, PartialEq, Eq)]
-/// The reason the channel was closed. See individual variants more details.
+/// The reason the channel was closed. See individual variants for more details.
pub enum ClosureReason {
/// Closure generated from receiving a peer error message.
///
///
/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
- OutdatedChannelManager
+ OutdatedChannelManager,
+ /// The counterparty requested a cooperative close of a channel that had not been funded yet.
+ /// The channel has been immediately closed.
+ CounterpartyCoopClosedUnfundedChannel,
}
impl core::fmt::Display for ClosureReason {
},
ClosureReason::DisconnectedPeer => f.write_str("the peer disconnected prior to the channel being funded"),
ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"),
+ ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"),
}
}
}
(8, ProcessingError) => { (1, err, required) },
(10, DisconnectedPeer) => {},
(12, OutdatedChannelManager) => {},
+ (13, CounterpartyCoopClosedUnfundedChannel) => {},
);
/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
/// See [`ChannelContext::sent_message_awaiting_response`] for more information.
pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
+/// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
+/// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
+/// exceeding this age limit will be force-closed and purged from memory.
+pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
+
struct PendingChannelMonitorUpdate {
update: ChannelMonitorUpdate,
}
(0, update, required),
});
+/// Contains all state common to unfunded inbound/outbound channels.
+pub(super) struct UnfundedChannelContext {
+ /// A counter tracking how many ticks have elapsed since this unfunded channel was
+ /// created. If this unfunded channel reaches peer has yet to respond after reaching
+ /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
+ ///
+ /// This is so that we don't keep channels around that haven't progressed to a funded state
+ /// in a timely manner.
+ unfunded_channel_age_ticks: usize,
+}
+
+impl UnfundedChannelContext {
+ /// Determines whether we should force-close and purge this unfunded channel from memory due to it
+ /// having reached the unfunded channel age limit.
+ ///
+ /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
+ pub fn should_expire_unfunded_channel(&mut self) -> bool {
+ self.unfunded_channel_age_ticks += 1;
+ self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
+ }
+}
+
/// Contains everything about the channel including state, and various flags.
pub(super) struct ChannelContext<Signer: ChannelSigner> {
config: LegacyChannelConfig,
}
/// Returns the funding_txo we either got from our peer, or were given by
- /// get_outbound_funding_created.
+ /// get_funding_created.
pub fn get_funding_txo(&self) -> Option<OutPoint> {
self.channel_transaction_parameters.funding_outpoint
}
#[inline]
/// Creates a set of keys for build_commitment_transaction to generate a transaction which we
/// will sign and send to our counterparty.
- /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
+ /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
fn build_remote_transaction_keys(&self) -> TxCreationKeys {
//TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
//may see payments to it!
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
+// calling channel_id() before we're set up or things like get_funding_signed on an
// inbound channel.
//
// Holder designates channel data owned for the benefit of the user client.
/// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
pub(super) struct OutboundV1Channel<Signer: ChannelSigner> {
pub context: ChannelContext<Signer>,
+ pub unfunded_context: UnfundedChannelContext,
}
impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
channel_keys_id,
blocked_monitor_updates: Vec::new(),
- }
+ },
+ unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
})
}
- /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
- fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
+ /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
+ fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
let counterparty_keys = self.context.build_remote_transaction_keys();
let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
/// Note that channel_id changes during this call!
/// Do NOT broadcast the funding transaction until after a successful funding_signed call!
/// If an Err is returned, it is a ChannelError::Close.
- pub fn get_outbound_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
+ pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
-> Result<(Channel<Signer>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
if !self.context.is_outbound() {
panic!("Tried to create outbound funding_created message on an inbound channel!");
self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
- let signature = match self.get_outbound_funding_created_signature(logger) {
+ let signature = match self.get_funding_created_signature(logger) {
Ok(res) => res,
Err(e) => {
log_error!(logger, "Got bad signatures: {:?}!", e);
/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
pub(super) struct InboundV1Channel<Signer: ChannelSigner> {
pub context: ChannelContext<Signer>,
+ pub unfunded_context: UnfundedChannelContext,
}
impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {
channel_keys_id,
blocked_monitor_updates: Vec::new(),
- }
+ },
+ unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
};
Ok(chan)
value: 10000000, script_pubkey: output_script.clone(),
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
- let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+ let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
// Node B --> Node A: funding signed
value: 10000000, script_pubkey: output_script.clone(),
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
- let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+ let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
// Node B --> Node A: funding signed
value: 10000000, script_pubkey: output_script.clone(),
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
- let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+ let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
// Node B --> Node A: funding signed
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
&& self.in_flight_monitor_updates.is_empty()
}
- // Returns a count of all channels we have with this peer, including pending channels.
+ // Returns a count of all channels we have with this peer, including unfunded channels.
fn total_channel_count(&self) -> usize {
self.channel_by_id.len() +
self.outbound_v1_channel_by_id.len() +
},
}
};
- ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => {
+ ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => {
match $err {
- // We should only ever have `ChannelError::Close` when prefunded channels error.
+ // We should only ever have `ChannelError::Close` when unfunded channels error.
// In any case, just close the channel.
ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
- log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
+ log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
update_maps_on_chan_removal!($self, &$channel_context);
let shutdown_res = $channel_context.force_shutdown(false);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
match $res {
Ok(res) => res,
Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED);
+ let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED);
if drop {
$entry.remove_entry();
}
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ // Only `Channels` in the channel_by_id map can be considered funded.
for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
peer_state.latest_features.clone(), &self.fee_estimator);
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let features = &peer_state.latest_features;
+ let chan_context_to_details = |context| {
+ ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
+ };
return peer_state.channel_by_id
.iter()
- .map(|(_, channel)|
- ChannelDetails::from_channel_context(&channel.context, best_block_height,
- features.clone(), &self.fee_estimator))
+ .map(|(_, channel)| &channel.context)
+ .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+ .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+ .map(chan_context_to_details)
.collect();
}
vec![]
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
- let per_peer_state = self.per_peer_state.read().unwrap();
+ {
+ let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex = per_peer_state.get(counterparty_node_id)
- .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
- let funding_txo_opt = chan_entry.get().context.get_funding_txo();
- let their_features = &peer_state.latest_features;
- let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
- .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
- failed_htlcs = htlcs;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
- // We can send the `shutdown` message before updating the `ChannelMonitor`
- // here as we don't need the monitor update to complete until we send a
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg: shutdown_msg,
- });
+ match peer_state.channel_by_id.entry(channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan_entry) => {
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
+ let their_features = &peer_state.latest_features;
+ let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
+ .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
+ failed_htlcs = htlcs;
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update_opt.take() {
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
- }
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg: shutdown_msg,
+ });
- if chan_entry.get().is_shutdown() {
- let channel = remove_channel!(self, chan_entry);
- if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: channel_update
- });
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt.take() {
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
}
- self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
- }
- break Ok(());
- },
- hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) })
+
+ if chan_entry.get().is_shutdown() {
+ let channel = remove_channel!(self, chan_entry);
+ if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: channel_update
+ });
+ }
+ self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
+ }
+ break Ok(());
+ },
+ hash_map::Entry::Vacant(_) => (),
+ }
}
+ // If we reach this point, it means that the channel_id either refers to an unfunded channel or
+ // it does not exist for this peer. Either way, we can attempt to force-close it.
+ //
+ // An appropriate error will be returned for non-existence of the channel if that's the case.
+ return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+ // TODO(dunxen): This is still not ideal as we're doing some extra lookups.
+ // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422
};
for htlc_source in failed_htlcs.drain(..) {
self.issue_channel_close_events(&chan.get().context, closure_reason);
let mut chan = remove_channel!(self, chan);
self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Prefunded channel has no update
+ // Unfunded channel has no update
(None, chan.context.get_counterparty_node_id())
} else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.issue_channel_close_events(&chan.get().context, closure_reason);
let mut chan = remove_channel!(self, chan);
self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Prefunded channel has no update
+ // Unfunded channel has no update
(None, chan.context.get_counterparty_node_id())
} else {
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
Some(chan) => {
let funding_txo = find_funding_output(&chan, &funding_transaction)?;
- let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+ let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
.map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
let channel_id = chan.context.channel_id();
let user_id = chan.context.get_user_id();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for channel_id in channel_ids {
- if !peer_state.channel_by_id.contains_key(channel_id) {
+ if !peer_state.has_channel(channel_id) {
return Err(APIError::ChannelUnavailable {
err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
});
- }
+ };
}
for channel_id in channel_ids {
- let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
- let mut config = channel.context.config();
- config.apply(config_update);
- if !channel.context.update_config(&config) {
+ if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
+ let mut config = channel.context.config();
+ config.apply(config_update);
+ if !channel.context.update_config(&config) {
+ continue;
+ }
+ if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+ } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg,
+ });
+ }
continue;
}
- if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
- } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.context.get_counterparty_node_id(),
- msg,
+
+ let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) {
+ &mut channel.context
+ } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) {
+ &mut channel.context
+ } else {
+ // This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
+ debug_assert!(false);
+ return Err(APIError::ChannelUnavailable {
+ err: format!(
+ "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
+ log_bytes!(*channel_id), counterparty_node_id),
});
- }
+ };
+ let mut config = context.config();
+ config.apply(config_update);
+ // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready`
+ // which would be the case for pending inbound/outbound channels.
+ context.update_config(&config);
}
Ok(())
}
/// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
/// with the current [`ChannelConfig`].
/// * Removing peers which have disconnected but and no longer have any channels.
+ /// * Force-closing and removing channels which have not completed establishment in a timely manner.
///
/// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
/// estimate fetches.
true
});
+
+ let process_unfunded_channel_tick = |
+ chan_id: &[u8; 32],
+ chan_context: &mut ChannelContext<<SP::Target as SignerProvider>::Signer>,
+ unfunded_chan_context: &mut UnfundedChannelContext,
+ | {
+ chan_context.maybe_expire_prev_config();
+ if unfunded_chan_context.should_expire_unfunded_channel() {
+ log_error!(self.logger, "Force-closing pending outbound channel {} for not establishing in a timely manner", log_bytes!(&chan_id[..]));
+ update_maps_on_chan_removal!(self, &chan_context);
+ self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed);
+ self.finish_force_close_channel(chan_context.force_shutdown(false));
+ false
+ } else {
+ true
+ }
+ };
+ peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context));
+ peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context));
+
if peer_state.ok_to_remove(true) {
pending_peers_awaiting_removal.push(counterparty_node_id);
}
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
-
- if !chan_entry.get().received_shutdown() {
- log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
- log_bytes!(msg.channel_id),
- if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
- }
+ // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per
+ // https://github.com/lightningdevkit/rust-lightning/issues/2422
+ if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+ self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+ let mut chan = remove_channel!(self, chan_entry);
+ self.finish_force_close_channel(chan.context.force_shutdown(false));
+ return Ok(());
+ } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+ self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+ let mut chan = remove_channel!(self, chan_entry);
+ self.finish_force_close_channel(chan.context.force_shutdown(false));
+ return Ok(());
+ } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+ if !chan_entry.get().received_shutdown() {
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ log_bytes!(msg.channel_id),
+ if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+ }
- let funding_txo_opt = chan_entry.get().context.get_funding_txo();
- let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
- chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
- dropped_htlcs = htlcs;
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
+ let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
+ chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
+ dropped_htlcs = htlcs;
- if let Some(msg) = shutdown {
- // We can send the `shutdown` message before updating the `ChannelMonitor`
- // here as we don't need the monitor update to complete until we send a
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
+ if let Some(msg) = shutdown {
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update_opt {
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
- }
- break Ok(());
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt {
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
+ }
+ break Ok(());
+ } else {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
for htlc_source in dropped_htlcs.drain(..) {
log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
let per_peer_state = self.per_peer_state.read().unwrap();
- for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.retain(|_, chan| {
- let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
- if !chan.context.have_received_message() {
- // If we created this (outbound) channel while we were disconnected from the
- // peer we probably failed to send the open_channel message, which is now
- // lost. We can't have had anything pending related to this channel, so we just
- // drop it.
- false
- } else {
- pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
- node_id: chan.context.get_counterparty_node_id(),
- msg: chan.get_channel_reestablish(&self.logger),
- });
- true
- }
- } else { true };
- if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
- if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
- if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
- pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
- node_id: *counterparty_node_id,
- msg, update_msg,
- });
- }
- }
- }
- retain
+
+ // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+ // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and
+ // clearing their maps here. Instead we can just send queue channel_reestablish messages for
+ // channels in the channel_by_id map.
+ peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| {
+ pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_channel_reestablish(&self.logger),
+ });
});
}
//TODO: Also re-broadcast announcement_signatures
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("expected BroadcastChannelUpdate event"),
}
+
+ // If we provide a channel_id not associated with the peer, we should get an error and no updates
+ // should be applied to ensure update atomicity as specified in the API docs.
+ let bad_channel_id = [10; 32];
+ let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
+ let new_fee = current_fee + 100;
+ assert!(
+ matches!(
+ nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
+ forwarding_fee_proportional_millionths: Some(new_fee),
+ ..Default::default()
+ }),
+ Err(APIError::ChannelUnavailable { err: _ }),
+ )
+ );
+ // Check that the fee hasn't changed for the channel that exists.
+ assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 0);
}
}
panic!("Unexpected event")
}
}
- for chan in $src_node.node.list_channels() {
- if chan.is_public && chan.counterparty.node_id != $dst_node.node.get_our_node_id() {
- if let Some(scid) = chan.short_channel_id {
- assert!(announcements.remove(&scid));
- }
- }
- }
assert!(announcements.is_empty());
res
}
use crate::ln::functional_test_utils::*;
use crate::ln::chan_utils::CommitmentTransaction;
+use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
+
#[test]
fn test_insane_channel_opens() {
// Stand up a network of 2 nodes
let (_, funding_created) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
+ // Once we call `get_funding_created` the channel has a duplicate channel_id as
// another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
// try to create another channel. Instead, we drop the channel entirely here (leaving the
// channelmanager in a possibly nonsense state instead).
let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
let logger = test_utils::TestLogger::new();
- as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+ as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
};
check_added_monitors!(nodes[0], 0);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
}
}
}
+
+#[test]
+fn test_remove_expired_outbound_unfunded_channels() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+ let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::FundingGenerationReady { .. } => (),
+ _ => panic!("Unexpected event"),
+ };
+
+ // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
+ let check_outbound_channel_existence = |should_exist: bool| {
+ let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+ let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
+ assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+ };
+
+ // Channel should exist without any timer ticks.
+ check_outbound_channel_existence(true);
+
+ // Channel should exist with 1 timer tick less than required.
+ for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+ nodes[0].node.timer_tick_occurred();
+ check_outbound_channel_existence(true)
+ }
+
+ // Remove channel after reaching the required ticks.
+ nodes[0].node.timer_tick_occurred();
+ check_outbound_channel_existence(false);
+
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+}
+
+#[test]
+fn test_remove_expired_inbound_unfunded_channels() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+ let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::FundingGenerationReady { .. } => (),
+ _ => panic!("Unexpected event"),
+ };
+
+ // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
+ let check_inbound_channel_existence = |should_exist: bool| {
+ let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
+ let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
+ assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+ };
+
+ // Channel should exist without any timer ticks.
+ check_inbound_channel_existence(true);
+
+ // Channel should exist with 1 timer tick less than required.
+ for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+ nodes[1].node.timer_tick_occurred();
+ check_inbound_channel_existence(true)
+ }
+
+ // Remove channel after reaching the required ticks.
+ nodes[1].node.timer_tick_occurred();
+ check_inbound_channel_existence(false);
+
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+}
///
/// This is not exported to bindings users as fmt can't be used in C
#[doc(hidden)]
-pub struct DebugIter<T: fmt::Display, I: core::iter::Iterator<Item = T> + Clone>(pub core::cell::RefCell<I>);
+pub struct DebugIter<T: fmt::Display, I: core::iter::Iterator<Item = T> + Clone>(pub I);
impl<T: fmt::Display, I: core::iter::Iterator<Item = T> + Clone> fmt::Display for DebugIter<T, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
- use core::ops::DerefMut;
write!(f, "[")?;
- let iter_ref = self.0.clone();
- let mut iter = iter_ref.borrow_mut();
- for item in iter.deref_mut() {
+ let mut iter = self.0.clone();
+ if let Some(item) = iter.next() {
write!(f, "{}", item)?;
- break;
}
- for item in iter.deref_mut() {
+ while let Some(item) = iter.next() {
write!(f, ", {}", item)?;
}
write!(f, "]")?;
macro_rules! log_iter {
($obj: expr) => {
- $crate::util::logger::DebugIter(core::cell::RefCell::new($obj))
+ $crate::util::logger::DebugIter($obj)
}
}
--- /dev/null
+[package]
+name = "msrv-check"
+version = "0.1.0"
+edition = "2018"
+
+[dependencies]
+lightning = { path = "../lightning" }
+lightning-block-sync = { path = "../lightning-block-sync", features = [ "rest-client", "rpc-client" ] }
+lightning-invoice = { path = "../lightning-invoice" }
+lightning-net-tokio = { path = "../lightning-net-tokio" }
+lightning-persister = { path = "../lightning-persister" }
+lightning-background-processor = { path = "../lightning-background-processor", features = ["futures"] }
+lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }