From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Wed, 18 Aug 2021 00:31:28 +0000 (+0000) Subject: Merge pull request #1048 from TheBlueMatt/2021-08-no-net-print X-Git-Tag: v0.0.101~34 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=803d8041f143469993620ed75e84cba226b860fa;hp=35d0b7aae73760df48f814db6ce4b72e8ee5523a;p=rust-lightning Merge pull request #1048 from TheBlueMatt/2021-08-no-net-print Remove explicit print in lightning-net-tokio, reduce redundant block connection logging --- diff --git a/CHANGELOG.md b/CHANGELOG.md index dc2ea744..a23f9d76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,15 +1,94 @@ -# 0.0.100 - WIP +# 0.0.100 - 2021-08-17 + +## API Updates + * The `lightning` crate can now be built in no_std mode, making it easy to + target embedded hardware for rust users. Note that mutexes are replaced with + no-ops for such builds (#1008, #1028). + * LDK now supports sending and receiving "keysend" payments. This includes + modifications to `lightning::util::events::Event::PaymentReceived` to + indicate the type of payment (#967). + * A new variant, `lightning::util::events::Event::PaymentForwarded` has been + added which indicates a forwarded payment has been successfully claimed and + we've received a forwarding fee (#1004). + * `lightning::chain::keysinterface::KeysInterface::get_shutdown_pubkey` has + been renamed to `get_shutdown_scriptpubkey`, returns a script, and is now + called on channel open only if + `lightning::util::config::ChannelConfig::commit_upfront_shutdown_pubkey` is + set (#1019). + * Closing-signed negotiation is now more configurable, with an explicit + `lightning::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis` + field allowing you to select the maximum amount you are willing to pay to + avoid a force-closure. Further, we are now less restrictive on the fee + placed on the closing transaction when we are not the party paying it. To + control the feerate paid on a channel at close-time, use + `ChannelManager::close_channel_with_target_feerate` instead of + `close_channel` (#1011). + * `lightning_background_processor::BackgroundProcessor` now stops the + background thread when dropped (#1007). It is marked `#[must_use]` so that + Rust users will receive a compile-time warning when it is immediately + dropped after construction (#1029). + * Total potential funds burn on force-close due to dust outputs is now limited + to `lightning::util::config::ChannelConfig::max_dust_htlc_exposure_msat` per + channel (#1009). + * The interval on which + `lightning::ln::peer_handler::PeerManager::timer_tick_occurred` should be + called has been reduced to once every five seconds (#1035) and + `lightning::ln::channelmanager::ChannelManager::timer_tick_occurred` should + now be called on startup in addition to once per minute (#985). + * The rust-bitcoin and bech32 dependencies have been updated to their + respective latest versions (0.27 and 0.8, #1012). + +## Bug Fixes + * Fix panic when reading invoices generated by some versions of c-lightning + (#1002 and #1003). + * Fix panic when attempting to validate a signed message of incorrect length + (#1010). + * Do not ignore the route hints in invoices when the invoice is over 250k + sats (#986). + * Fees are automatically updated on outbound channels to ensure commitment + transactions are always broadcastable (#985). + * Fixes a rare case where a `lightning::util::events::Event::SpendableOutputs` + event is not generated after a counterparty commitment transaction is + confirmed in a reorg when a conflicting local commitment transaction is + removed in the same reorg (#1022). + * Fixes a remotely-triggerable force-closure of an origin channel after an + HTLC was forwarded over a next-hop channel and the next-hop channel was + force-closed by our counterparty (#1025). + * Fixes a rare force-closure case when sending a payment as a channel fundee + when overdrawing our remaining balance. Instead the send will fail (#998). + * Fixes a rare force-closure case when a payment was claimed prior to a + peer disconnection or restart, and later failed (#977). ## Serialization Compatibility + * Pending inbound keysend payments which have neither been failed nor claimed + when serialized will result in a `ChannelManager` which is not readable on + pre-0.0.100 clients (#967). + * Because + `lightning::chain::keysinterface::KeysInterface::get_shutdown_scriptpubkey` + has been updated to return a script instead of only a `PublicKey`, + `ChannelManager`s constructed with custom `KeysInterface` implementations on + 0.0.100 and later versions will not be readable on previous versions. + `ChannelManager`s created with 0.0.99 and prior versions will remain readable + even after the a serialization roundtrip on 0.0.100, as long as no new + channels are opened. Further, users using a + `lightning::chain::keysinterface::KeysManager` as their `KeysInterface` will + have `ChannelManager`s which are readable on prior versions as well (#1019). + * `ChannelMonitorUpdate`s created by 0.0.100 and later for channels when + `lightning::util::config::ChannelConfig::commit_upfront_shutdown_pubkey` is + not set may not be readable by versions prior to 0.0.100 (#1019). * HTLCs which were in the process of being claimed on-chain when a pre-0.0.100 `ChannelMonitor` was serialized may generate `PaymentForwarded` events with spurious `fee_earned_msat` values. This only applies to payments which were - unresolved at the time of the upgrade. - * 0.0.100 clients with pending PaymentForwarded events at serialization-time - will generate serialized `ChannelManager` objects which 0.0.99 and earlier - clients cannot read. The likelihood of this can be reduced by ensuring you - process all pending events immediately before serialization (as is done by - the `lightning-background-processor` crate). + unresolved at the time of the upgrade (#1004). + * 0.0.100 clients with pending `Event::PaymentForwarded` events at + serialization-time will generate serialized `ChannelManager` objects which + 0.0.99 and earlier clients cannot read. The likelihood of this can be reduced + by ensuring you process all pending events immediately before serialization + (as is done by the `lightning-background-processor` crate, #1004). + + +In total, this release features 59 files changed, 5861 insertions, and 2082 +deletions in 95 commits from 6 authors. # 0.0.99 - 2021-07-09 diff --git a/fuzz/src/msg_targets/gen_target.sh b/fuzz/src/msg_targets/gen_target.sh index 044f1a1e..0c1d061a 100755 --- a/fuzz/src/msg_targets/gen_target.sh +++ b/fuzz/src/msg_targets/gen_target.sh @@ -14,7 +14,6 @@ echo "mod utils;" > mod.rs GEN_TEST AcceptChannel test_msg "" GEN_TEST AnnouncementSignatures test_msg "" GEN_TEST ChannelReestablish test_msg "" -GEN_TEST ClosingSigned test_msg "" GEN_TEST CommitmentSigned test_msg "" GEN_TEST DecodedOnionErrorPacket test_msg "" GEN_TEST FundingCreated test_msg "" @@ -40,6 +39,7 @@ GEN_TEST UpdateAddHTLC test_msg_hole ", 85, 33" GEN_TEST ErrorMessage test_msg_hole ", 32, 2" GEN_TEST ChannelUpdate test_msg_hole ", 108, 1" +GEN_TEST ClosingSigned test_msg_simple "" GEN_TEST Init test_msg_simple "" GEN_TEST OnionHopData test_msg_simple "" GEN_TEST Ping test_msg_simple "" diff --git a/fuzz/src/msg_targets/mod.rs b/fuzz/src/msg_targets/mod.rs index e11e3eb2..0f273cb7 100644 --- a/fuzz/src/msg_targets/mod.rs +++ b/fuzz/src/msg_targets/mod.rs @@ -2,7 +2,6 @@ mod utils; pub mod msg_accept_channel; pub mod msg_announcement_signatures; pub mod msg_channel_reestablish; -pub mod msg_closing_signed; pub mod msg_commitment_signed; pub mod msg_decoded_onion_error_packet; pub mod msg_funding_created; @@ -25,6 +24,7 @@ pub mod msg_gossip_timestamp_filter; pub mod msg_update_add_htlc; pub mod msg_error_message; pub mod msg_channel_update; +pub mod msg_closing_signed; pub mod msg_init; pub mod msg_onion_hop_data; pub mod msg_ping; diff --git a/fuzz/src/msg_targets/msg_closing_signed.rs b/fuzz/src/msg_targets/msg_closing_signed.rs index 47881d32..52f39af2 100644 --- a/fuzz/src/msg_targets/msg_closing_signed.rs +++ b/fuzz/src/msg_targets/msg_closing_signed.rs @@ -17,11 +17,11 @@ use utils::test_logger; #[inline] pub fn msg_closing_signed_test(data: &[u8], _out: Out) { - test_msg!(msgs::ClosingSigned, data); + test_msg_simple!(msgs::ClosingSigned, data); } #[no_mangle] pub extern "C" fn msg_closing_signed_run(data: *const u8, datalen: usize) { let data = unsafe { std::slice::from_raw_parts(data, datalen) }; - test_msg!(msgs::ClosingSigned, data); + test_msg_simple!(msgs::ClosingSigned, data); } diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml index 1659ffd3..98d9ade9 100644 --- a/lightning-background-processor/Cargo.toml +++ b/lightning-background-processor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-background-processor" -version = "0.0.99" +version = "0.0.100" authors = ["Valentine Wallace "] license = "MIT OR Apache-2.0" repository = "http://github.com/rust-bitcoin/rust-lightning" @@ -11,9 +11,9 @@ edition = "2018" [dependencies] bitcoin = "0.27" -lightning = { version = "0.0.99", path = "../lightning", features = ["allow_wallclock_use"] } -lightning-persister = { version = "0.0.99", path = "../lightning-persister" } +lightning = { version = "0.0.100", path = "../lightning", features = ["allow_wallclock_use"] } +lightning-persister = { version = "0.0.100", path = "../lightning-persister" } [dev-dependencies] -lightning = { version = "0.0.99", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] } diff --git a/lightning-block-sync/Cargo.toml b/lightning-block-sync/Cargo.toml index 36bb5af9..2a8ff0c6 100644 --- a/lightning-block-sync/Cargo.toml +++ b/lightning-block-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-block-sync" -version = "0.0.99" +version = "0.0.100" authors = ["Jeffrey Czyz", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "http://github.com/rust-bitcoin/rust-lightning" @@ -15,7 +15,7 @@ rpc-client = [ "serde", "serde_json", "chunked_transfer" ] [dependencies] bitcoin = "0.27" -lightning = { version = "0.0.99", path = "../lightning" } +lightning = { version = "0.0.100", path = "../lightning" } tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true } serde = { version = "1.0", features = ["derive"], optional = true } serde_json = { version = "1.0", optional = true } diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index 404f12d7..8c6623b8 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lightning-invoice" description = "Data structures to parse and serialize BOLT11 lightning invoices" -version = "0.7.0" +version = "0.8.0" authors = ["Sebastian Geisler "] documentation = "https://docs.rs/lightning-invoice/" license = "MIT OR Apache-2.0" @@ -10,10 +10,10 @@ readme = "README.md" [dependencies] bech32 = "0.8" -lightning = { version = "0.0.99", path = "../lightning" } +lightning = { version = "0.0.100", path = "../lightning" } secp256k1 = { version = "0.20", features = ["recovery"] } num-traits = "0.2.8" bitcoin_hashes = "0.10" [dev-dependencies] -lightning = { version = "0.0.99", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] } diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index 928eab7a..053766e1 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-net-tokio" -version = "0.0.99" +version = "0.0.100" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/rust-bitcoin/rust-lightning/" @@ -12,7 +12,7 @@ edition = "2018" [dependencies] bitcoin = "0.27" -lightning = { version = "0.0.99", path = "../lightning" } +lightning = { version = "0.0.100", path = "../lightning" } tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] } [dev-dependencies] diff --git a/lightning-persister/Cargo.toml b/lightning-persister/Cargo.toml index 81c69d0f..cc585b9c 100644 --- a/lightning-persister/Cargo.toml +++ b/lightning-persister/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-persister" -version = "0.0.99" +version = "0.0.100" authors = ["Valentine Wallace", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/rust-bitcoin/rust-lightning/" @@ -13,11 +13,11 @@ unstable = ["lightning/unstable"] [dependencies] bitcoin = "0.27" -lightning = { version = "0.0.99", path = "../lightning" } +lightning = { version = "0.0.100", path = "../lightning" } libc = "0.2" [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.0.99", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] } diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 8580a53a..1ef4c74f 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.0.99" +version = "0.0.100" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/rust-bitcoin/rust-lightning/" diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 37bcabfc..70bffeeb 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -142,7 +142,7 @@ fn test_monitor_and_persister_update_fail() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) { - if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { + if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that even though the persister is returning a TemporaryFailure, // because the update is bogus, ultimately the error that's returned // should be a PermanentFailure. @@ -2561,8 +2561,8 @@ fn test_reconnect_dup_htlc_claims() { #[test] fn test_temporary_error_during_shutdown() { - // Test that temporary failures when updating the monitor's shutdown script do not prevent - // cooperative close. + // Test that temporary failures when updating the monitor's shutdown script delay cooperative + // close. let mut config = test_default_channel_config(); config.channel_options.commit_upfront_shutdown_pubkey = false; @@ -2575,9 +2575,41 @@ fn test_temporary_error_during_shutdown() { *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, false); - check_added_monitors!(nodes[0], 1); + + nodes[0].node.close_channel(&channel_id).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id())); + check_added_monitors!(nodes[0], 1); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; + *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; + + let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); + let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap()); + let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(none_b.is_none()); + let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + + assert_eq!(txn_a, txn_b); + assert_eq!(txn_a.len(), 1); + check_spends!(txn_a[0], funding_tx); } #[test] diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a0071d54..03968118 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -255,8 +255,6 @@ enum ChannelState { RemoteShutdownSent = 1 << 10, /// Flag which is set on ChannelFunded or FundingSent after sending a shutdown message. At this /// point, we may not add any new HTLCs to the channel. - /// TODO: Investigate some kind of timeout mechanism by which point the remote end must provide - /// us their shutdown. LocalShutdownSent = 1 << 11, /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about /// to drop us, but we store this anyway. @@ -453,7 +451,20 @@ pub(super) struct Channel { /// Max to_local and to_remote outputs in a remote-generated commitment transaction counterparty_max_commitment_tx_output: Mutex<(u64, u64)>, - last_sent_closing_fee: Option<(u32, u64, Signature)>, // (feerate, fee, holder_sig) + last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig) + target_closing_feerate_sats_per_kw: Option, + + /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor` + /// update, we need to delay processing it until later. We do that here by simply storing the + /// closing_signed message and handling it in `maybe_propose_closing_signed`. + pending_counterparty_closing_signed: Option, + + /// The minimum and maximum absolute fee we are willing to place on the closing transaction. + /// These are set once we reach `closing_negotiation_ready`. + #[cfg(test)] + pub(crate) closing_fee_limits: Option<(u64, u64)>, + #[cfg(not(test))] + closing_fee_limits: Option<(u64, u64)>, /// The hash of the block in which the funding transaction was included. funding_tx_confirmed_in: Option, @@ -496,6 +507,13 @@ pub(super) struct Channel { commitment_secrets: CounterpartyCommitmentSecrets, channel_update_status: ChannelUpdateStatus, + /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does + /// not complete within a single timer tick (one minute), we should force-close the channel. + /// This prevents us from keeping unusable channels around forever if our counterparty wishes + /// to DoS us. + /// Note that this field is reset to false on deserialization to give us a chance to connect to + /// our peer and start the closing_signed negotiation fresh. + closing_signed_in_flight: bool, /// Our counterparty's channel_announcement signatures provided in announcement_signatures. /// This can be used to rebroadcast the channel_announcement message later. @@ -574,6 +592,7 @@ pub const MIN_DUST_LIMIT_SATOSHIS: u64 = 330; /// channel_id in ChannelManager. pub(super) enum ChannelError { Ignore(String), + Warn(String), Close(String), CloseDelayBroadcast(String), } @@ -582,6 +601,7 @@ impl fmt::Debug for ChannelError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e), + &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e), &ChannelError::Close(ref e) => write!(f, "Close : {}", e), &ChannelError::CloseDelayBroadcast(ref e) => write!(f, "CloseDelayBroadcast : {}", e) } @@ -693,6 +713,9 @@ impl Channel { counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, funding_tx_confirmed_in: None, funding_tx_confirmation_height: 0, @@ -728,6 +751,7 @@ impl Channel { commitment_secrets: CounterpartyCommitmentSecrets::new(), channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, announcement_sigs: None, @@ -952,6 +976,9 @@ impl Channel { counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, funding_tx_confirmed_in: None, funding_tx_confirmation_height: 0, @@ -991,6 +1018,7 @@ impl Channel { commitment_secrets: CounterpartyCommitmentSecrets::new(), channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, announcement_sigs: None, @@ -2371,9 +2399,8 @@ impl Channel { Ok(()) } - pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, fee_estimator: &F, logger: &L) -> Result<(msgs::RevokeAndACK, Option, Option, ChannelMonitorUpdate), (Option, ChannelError)> - where F::Target: FeeEstimator, - L::Target: Logger + pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<(msgs::RevokeAndACK, Option, ChannelMonitorUpdate), (Option, ChannelError)> + where L::Target: Logger { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()))); @@ -2539,12 +2566,10 @@ impl Channel { } log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id)); - // TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and - // re-send the message on restoration) return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned()))); } - let (commitment_signed, closing_signed) = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + let commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - // we'll send one right away when we get the revoke_and_ack when we // free_holding_cell_htlcs(). @@ -2553,10 +2578,8 @@ impl Channel { // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - (Some(msg), None) - } else if !need_commitment { - (None, self.maybe_propose_first_closing_signed(fee_estimator)) - } else { (None, None) }; + Some(msg) + } else { None }; log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" }); @@ -2565,7 +2588,7 @@ impl Channel { channel_id: self.channel_id, per_commitment_secret, next_per_commitment_point, - }, commitment_signed, closing_signed, monitor_update)) + }, commitment_signed, monitor_update)) } /// Public version of the below, checking relevant preconditions first. @@ -2702,9 +2725,8 @@ impl Channel { /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, /// generating an appropriate error *after* the channel state has been updated based on the /// revoke_and_ack message. - pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, fee_estimator: &F, logger: &L) -> Result<(Option, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Option, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>), ChannelError> - where F::Target: FeeEstimator, - L::Target: Logger, + pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Option, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>), ChannelError> + where L::Target: Logger, { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned())); @@ -2885,7 +2907,7 @@ impl Channel { self.monitor_pending_forwards.append(&mut to_forward_infos); self.monitor_pending_failures.append(&mut revoked_htlcs); log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); - return Ok((None, Vec::new(), Vec::new(), None, monitor_update, Vec::new())) + return Ok((None, Vec::new(), Vec::new(), monitor_update, Vec::new())) } match self.free_holding_cell_htlcs(logger)? { @@ -2904,7 +2926,7 @@ impl Channel { self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, None, monitor_update, htlcs_to_fail)) + Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail)) }, (None, htlcs_to_fail) => { if require_commitment { @@ -2924,14 +2946,13 @@ impl Channel { update_fail_malformed_htlcs, update_fee: None, commitment_signed - }), to_forward_infos, revoked_htlcs, None, monitor_update, htlcs_to_fail)) + }), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail)) } else { log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id())); - Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update, htlcs_to_fail)) + Ok((None, to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail)) } } } - } /// Adds a pending update to this channel. See the doc for send_htlc for @@ -2986,6 +3007,8 @@ impl Channel { // Upon reconnect we have to start the closing_signed dance over, but shutdown messages // will be retransmitted. self.last_sent_closing_fee = None; + self.pending_counterparty_closing_signed = None; + self.closing_fee_limits = None; let mut inbound_drop_count = 0; self.pending_inbound_htlcs.retain(|htlc| { @@ -3323,7 +3346,8 @@ impl Channel { // now! match self.free_holding_cell_htlcs(logger) { Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)), - Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"), + Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) => + panic!("Got non-channel-failing result from free_holding_cell_htlcs"), Ok((Some((commitment_update, monitor_update)), htlcs_to_fail)) => { return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(monitor_update), self.resend_order.clone(), htlcs_to_fail, shutdown_msg)); }, @@ -3352,44 +3376,125 @@ impl Channel { } } - fn maybe_propose_first_closing_signed(&mut self, fee_estimator: &F) -> Option + /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole + /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart, + /// at which point they will be recalculated. + fn calculate_closing_fee_limits(&mut self, fee_estimator: &F) -> (u64, u64) where F::Target: FeeEstimator { - if !self.is_outbound() || !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() || - self.channel_state & (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32) != BOTH_SIDES_SHUTDOWN_MASK || - self.last_sent_closing_fee.is_some() || self.pending_update_fee.is_some() { - return None; - } + if let Some((min, max)) = self.closing_fee_limits { return (min, max); } + // Propose a range from our current Background feerate to our Normal feerate plus our + // force_close_avoidance_max_fee_satoshis. + // If we fail to come to consensus, we'll have to force-close. let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); - if self.feerate_per_kw > proposed_feerate { - proposed_feerate = self.feerate_per_kw; - } - assert!(self.shutdown_scriptpubkey.is_some()); + let normal_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() }; + + // The spec requires that (when the channel does not have anchors) we only send absolute + // channel fees no greater than the absolute channel fee on the current commitment + // transaction. It's unclear *which* commitment transaction this refers to, and there isn't + // very good reason to apply such a limit in any case. We don't bother doing so, risking + // some force-closure by old nodes, but we wanted to close the channel anyway. + + if let Some(target_feerate) = self.target_closing_feerate_sats_per_kw { + let min_feerate = if self.is_outbound() { target_feerate } else { cmp::min(self.feerate_per_kw, target_feerate) }; + proposed_feerate = cmp::max(proposed_feerate, min_feerate); + proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate); + } + + // Note that technically we could end up with a lower minimum fee if one sides' balance is + // below our dust limit, causing the output to disappear. We don't bother handling this + // case, however, as this should only happen if a channel is closed before any (material) + // payments have been made on it. This may cause slight fee overpayment and/or failure to + // come to consensus with our counterparty on appropriate fees, however it should be a + // relatively rare case. We can revisit this later, though note that in order to determine + // if the funders' output is dust we have to know the absolute fee we're going to use. let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000; + let proposed_max_total_fee_satoshis = if self.is_outbound() { + // We always add force_close_avoidance_max_fee_satoshis to our normal + // feerate-calculated fee, but allow the max to be overridden if we're using a + // target feerate-calculated fee. + cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.config.force_close_avoidance_max_fee_satoshis, + proposed_max_feerate as u64 * tx_weight / 1000) + } else { + u64::max_value() + }; + + self.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis)); + self.closing_fee_limits.clone().unwrap() + } + + /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true + /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At + /// this point if we're the funder we should send the initial closing_signed, and in any case + /// shutdown should complete within a reasonable timeframe. + fn closing_negotiation_ready(&self) -> bool { + self.pending_inbound_htlcs.is_empty() && self.pending_outbound_htlcs.is_empty() && + self.channel_state & + (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 | + ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) + == BOTH_SIDES_SHUTDOWN_MASK && + self.pending_update_fee.is_none() + } + + /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning + /// an Err if no progress is being made and the channel should be force-closed instead. + /// Should be called on a one-minute timer. + pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> { + if self.closing_negotiation_ready() { + if self.closing_signed_in_flight { + return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned())); + } else { + self.closing_signed_in_flight = true; + } + } + Ok(()) + } + + pub fn maybe_propose_closing_signed(&mut self, fee_estimator: &F, logger: &L) + -> Result<(Option, Option), ChannelError> + where F::Target: FeeEstimator, L::Target: Logger + { + if self.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() { + return Ok((None, None)); + } + + if !self.is_outbound() { + if let Some(msg) = &self.pending_counterparty_closing_signed.take() { + return self.closing_signed(fee_estimator, &msg); + } + return Ok((None, None)); + } + + let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator); + + assert!(self.shutdown_scriptpubkey.is_some()); + let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false); + log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)", + our_min_fee, our_max_fee, total_fee_satoshis); - let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(proposed_total_fee_satoshis, false); let sig = self.holder_signer .sign_closing_transaction(&closing_tx, &self.secp_ctx) - .ok(); - assert!(closing_tx.get_weight() as u64 <= tx_weight); - if sig.is_none() { return None; } + .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?; - self.last_sent_closing_fee = Some((proposed_feerate, total_fee_satoshis, sig.clone().unwrap())); - Some(msgs::ClosingSigned { + self.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone())); + Ok((Some(msgs::ClosingSigned { channel_id: self.channel_id, fee_satoshis: total_fee_satoshis, - signature: sig.unwrap(), - }) + signature: sig, + fee_range: Some(msgs::ClosingSignedFeeRange { + min_fee_satoshis: our_min_fee, + max_fee_satoshis: our_max_fee, + }), + }), None)) } - pub fn shutdown( - &mut self, fee_estimator: &F, keys_provider: &K, their_features: &InitFeatures, msg: &msgs::Shutdown - ) -> Result<(Option, Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError> - where - F::Target: FeeEstimator, - K::Target: KeysInterface + pub fn shutdown( + &mut self, keys_provider: &K, their_features: &InitFeatures, msg: &msgs::Shutdown + ) -> Result<(Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError> + where K::Target: KeysInterface { if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned())); @@ -3477,7 +3582,7 @@ impl Channel { self.channel_state |= ChannelState::LocalShutdownSent as u32; self.update_time_counter += 1; - Ok((shutdown, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update, dropped_outbound_htlcs)) + Ok((shutdown, monitor_update, dropped_outbound_htlcs)) } fn build_signed_closing_transaction(&self, tx: &mut Transaction, counterparty_sig: &Signature, sig: &Signature) { @@ -3518,10 +3623,19 @@ impl Channel { return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); } + if self.is_outbound() && self.last_sent_closing_fee.is_none() { + return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned())); + } + + if self.channel_state & ChannelState::MonitorUpdateFailed as u32 != 0 { + self.pending_counterparty_closing_signed = Some(msg.clone()); + return Ok((None, None)); + } + let funding_redeemscript = self.get_funding_redeemscript(); let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false); if used_total_fee != msg.fee_satoshis { - return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee greater than the value they can claim. Fee in message: {}", msg.fee_satoshis))); + return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee))); } let mut sighash = hash_to_message!(&bip143::SigHashCache::new(&closing_tx).signature_hash(0, &funding_redeemscript, self.channel_value_satoshis, SigHashType::All)[..]); @@ -3536,76 +3650,104 @@ impl Channel { }, }; - let closing_tx_max_weight = self.get_closing_transaction_weight( - if let Some(oup) = closing_tx.output.get(0) { Some(&oup.script_pubkey) } else { None }, - if let Some(oup) = closing_tx.output.get(1) { Some(&oup.script_pubkey) } else { None }); - if let Some((_, last_fee, sig)) = self.last_sent_closing_fee { + assert!(self.shutdown_scriptpubkey.is_some()); + if let Some((last_fee, sig)) = self.last_sent_closing_fee { if last_fee == msg.fee_satoshis { self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); - assert!(closing_tx.get_weight() as u64 <= closing_tx_max_weight); - debug_assert!(closing_tx.get_weight() as u64 >= closing_tx_max_weight - 2); self.channel_state = ChannelState::ShutdownComplete as u32; self.update_time_counter += 1; return Ok((None, Some(closing_tx))); } } - macro_rules! propose_new_feerate { - ($new_feerate: expr) => { - assert!(self.shutdown_scriptpubkey.is_some()); - let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); - let (closing_tx, used_total_fee) = self.build_closing_transaction($new_feerate as u64 * tx_weight / 1000, false); + let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator); + + macro_rules! propose_fee { + ($new_fee: expr) => { + let (mut tx, used_fee) = if $new_fee == msg.fee_satoshis { + (closing_tx, $new_fee) + } else { + self.build_closing_transaction($new_fee, false) + }; + let sig = self.holder_signer - .sign_closing_transaction(&closing_tx, &self.secp_ctx) + .sign_closing_transaction(&tx, &self.secp_ctx) .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; - assert!(closing_tx.get_weight() as u64 <= tx_weight); - self.last_sent_closing_fee = Some(($new_feerate, used_total_fee, sig.clone())); + + let signed_tx = if $new_fee == msg.fee_satoshis { + self.channel_state = ChannelState::ShutdownComplete as u32; + self.update_time_counter += 1; + self.build_signed_closing_transaction(&mut tx, &msg.signature, &sig); + Some(tx) + } else { None }; + + self.last_sent_closing_fee = Some((used_fee, sig.clone())); return Ok((Some(msgs::ClosingSigned { channel_id: self.channel_id, - fee_satoshis: used_total_fee, + fee_satoshis: used_fee, signature: sig, - }), None)) + fee_range: Some(msgs::ClosingSignedFeeRange { + min_fee_satoshis: our_min_fee, + max_fee_satoshis: our_max_fee, + }), + }), signed_tx)) } } - let mut min_feerate = 253; - if self.is_outbound() { - let max_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); - if (msg.fee_satoshis as u64) > max_feerate as u64 * closing_tx_max_weight / 1000 { - if let Some((last_feerate, _, _)) = self.last_sent_closing_fee { - if max_feerate <= last_feerate { - return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wanted something higher ({}) than our Normal feerate ({})", last_feerate, max_feerate))); - } + if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range { + if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis { + return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis))); + } + if max_fee_satoshis < our_min_fee { + return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee))); + } + if min_fee_satoshis > our_max_fee { + return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee))); + } + + if !self.is_outbound() { + // They have to pay, so pick the highest fee in the overlapping range. + debug_assert_eq!(our_max_fee, u64::max_value()); // We should never set an upper bound + propose_fee!(cmp::min(max_fee_satoshis, our_max_fee)); + } else { + if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee { + return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.", + msg.fee_satoshis, our_min_fee, our_max_fee))); } - propose_new_feerate!(max_feerate); + // The proposed fee is in our acceptable range, accept it and broadcast! + propose_fee!(msg.fee_satoshis); } } else { - min_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); - } - if (msg.fee_satoshis as u64) < min_feerate as u64 * closing_tx_max_weight / 1000 { - if let Some((last_feerate, _, _)) = self.last_sent_closing_fee { - if min_feerate >= last_feerate { - return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wanted something lower ({}) than our Background feerate ({}).", last_feerate, min_feerate))); + // Old fee style negotiation. We don't bother to enforce whether they are complying + // with the "making progress" requirements, we just comply and hope for the best. + if let Some((last_fee, _)) = self.last_sent_closing_fee { + if msg.fee_satoshis > last_fee { + if msg.fee_satoshis < our_max_fee { + propose_fee!(msg.fee_satoshis); + } else if last_fee < our_max_fee { + propose_fee!(our_max_fee); + } else { + return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee))); + } + } else { + if msg.fee_satoshis > our_min_fee { + propose_fee!(msg.fee_satoshis); + } else if last_fee > our_min_fee { + propose_fee!(our_min_fee); + } else { + return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee))); + } + } + } else { + if msg.fee_satoshis < our_min_fee { + propose_fee!(our_min_fee); + } else if msg.fee_satoshis > our_max_fee { + propose_fee!(our_max_fee); + } else { + propose_fee!(msg.fee_satoshis); } } - propose_new_feerate!(min_feerate); } - - let sig = self.holder_signer - .sign_closing_transaction(&closing_tx, &self.secp_ctx) - .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; - self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); - assert!(closing_tx.get_weight() as u64 <= closing_tx_max_weight); - debug_assert!(closing_tx.get_weight() as u64 >= closing_tx_max_weight - 2); - - self.channel_state = ChannelState::ShutdownComplete as u32; - self.update_time_counter += 1; - - Ok((Some(msgs::ClosingSigned { - channel_id: self.channel_id, - fee_satoshis: msg.fee_satoshis, - signature: sig, - }), Some(closing_tx))) } // Public utilities: @@ -3810,6 +3952,16 @@ impl Channel { self.channel_state >= ChannelState::FundingSent as u32 } + /// Returns true if our peer has either initiated or agreed to shut down the channel. + pub fn received_shutdown(&self) -> bool { + (self.channel_state & ChannelState::RemoteShutdownSent as u32) != 0 + } + + /// Returns true if we either initiated or agreed to shut down the channel. + pub fn sent_shutdown(&self) -> bool { + (self.channel_state & ChannelState::LocalShutdownSent as u32) != 0 + } + /// Returns true if this channel is fully shut down. True here implies that no further actions /// may/will be taken on this channel, and thus this object should be freed. Any future changes /// will be handled appropriately by the chain monitor. @@ -4638,7 +4790,8 @@ impl Channel { /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - pub fn get_shutdown(&mut self, keys_provider: &K, their_features: &InitFeatures) -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> + pub fn get_shutdown(&mut self, keys_provider: &K, their_features: &InitFeatures, target_feerate_sats_per_kw: Option) + -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> where K::Target: KeysInterface { for htlc in self.pending_outbound_htlcs.iter() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { @@ -4671,6 +4824,7 @@ impl Channel { }; // From here on out, we may not fail! + self.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; if self.channel_state < ChannelState::FundingSent as u32 { self.channel_state = ChannelState::ShutdownComplete as u32; } else { @@ -4957,15 +5111,11 @@ impl Writeable for Channel { self.update_time_counter.write(writer)?; self.feerate_per_kw.write(writer)?; - match self.last_sent_closing_fee { - Some((feerate, fee, sig)) => { - 1u8.write(writer)?; - feerate.write(writer)?; - fee.write(writer)?; - sig.write(writer)?; - }, - None => 0u8.write(writer)?, - } + // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here, + // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe + // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never + // consider the stale state on reload. + 0u8.write(writer)?; self.funding_tx_confirmed_in.write(writer)?; self.funding_tx_confirmation_height.write(writer)?; @@ -5027,6 +5177,7 @@ impl Writeable for Channel { (3, self.counterparty_selected_channel_reserve_satoshis, option), (5, self.config, required), (7, self.shutdown_scriptpubkey, option), + (9, self.target_closing_feerate_sats_per_kw, option), }); Ok(()) @@ -5173,11 +5324,19 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel let update_time_counter = Readable::read(reader)?; let feerate_per_kw = Readable::read(reader)?; - let last_sent_closing_fee = match ::read(reader)? { - 0 => None, - 1 => Some((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)), + // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here, + // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe + // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never + // consider the stale state on reload. + match ::read(reader)? { + 0 => {}, + 1 => { + let _: u32 = Readable::read(reader)?; + let _: u64 = Readable::read(reader)?; + let _: Signature = Readable::read(reader)?; + }, _ => return Err(DecodeError::InvalidValue), - }; + } let funding_tx_confirmed_in = Readable::read(reader)?; let funding_tx_confirmation_height = Readable::read(reader)?; @@ -5251,12 +5410,14 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel }; let mut announcement_sigs = None; + let mut target_closing_feerate_sats_per_kw = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), (3, counterparty_selected_channel_reserve_satoshis, option), (5, config, option), // Note that if none is provided we will *not* overwrite the existing one. (7, shutdown_scriptpubkey, option), + (9, target_closing_feerate_sats_per_kw, option), }); let mut secp_ctx = Secp256k1::new(); @@ -5305,7 +5466,10 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel #[cfg(debug_assertions)] counterparty_max_commitment_tx_output: Mutex::new((0, 0)), - last_sent_closing_fee, + last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw, funding_tx_confirmed_in, funding_tx_confirmation_height, @@ -5334,6 +5498,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel commitment_secrets, channel_update_status, + closing_signed_in_flight: false, announcement_sigs, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 38e2172e..ac2297f8 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -276,6 +276,10 @@ impl MsgHandleErrInternal { fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { + ChannelError::Warn(msg) => LightningError { + err: msg, + action: msgs::ErrorAction::IgnoreError, + }, ChannelError::Ignore(msg) => LightningError { err: msg, action: msgs::ErrorAction::IgnoreError, @@ -819,6 +823,11 @@ macro_rules! handle_error { macro_rules! convert_chan_err { ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { match $err { + ChannelError::Warn(msg) => { + //TODO: Once warning messages are merged, we should send a `warning` message to our + //peer here. + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) + }, ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, @@ -1275,12 +1284,7 @@ impl ChannelMana self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } - /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs - /// will be accepted on the given channel, and after additional timeout/the closing of all - /// pending HTLCs, the channel will be closed on chain. - /// - /// May generate a SendShutdown message event on success, which should be relayed. - pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let counterparty_node_id; @@ -1296,7 +1300,7 @@ impl ChannelMana Some(peer_state) => { let peer_state = peer_state.lock().unwrap(); let their_features = &peer_state.latest_features; - chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features)? + chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? }, None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), }; @@ -1341,6 +1345,50 @@ impl ChannelMana Ok(()) } + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// * If we are the channel initiator, we will pay between our [`Background`] and + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate. + /// * If our counterparty is the channel initiator, we will require a channel closing + /// transaction feerate of at least our [`Background`] feerate or the feerate which + /// would appear on a force-closure transaction, whichever is lower. We will allow our + /// counterparty to pay as much fee as they'd like, however. + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + self.close_channel_internal(channel_id, None) + } + + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated + /// the channel being closed or not: + /// * If we are the channel initiator, we will pay at least this feerate on the closing + /// transaction. The upper-bound is set by + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). + /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure + /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which + /// will appear on a force-closure transaction, whichever is lower). + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> { + self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight)) + } + #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (monitor_update_option, mut failed_htlcs) = shutdown_res; @@ -2322,9 +2370,9 @@ impl ChannelMana // close channel and then send error message to peer. let counterparty_node_id = chan.get().get_counterparty_node_id(); let err: Result<(), _> = match e { - ChannelError::Ignore(_) => { + ChannelError::Ignore(_) | ChannelError::Warn(_) => { panic!("Stated return value requirements in send_commitment() were not met"); - }, + } ChannelError::Close(msg) => { log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); let (channel_id, mut channel) = chan.remove_entry(); @@ -2667,6 +2715,20 @@ impl ChannelMana let pending_msg_events = &mut channel_state.pending_msg_events; let short_to_id = &mut channel_state.short_to_id; channel_state.by_id.retain(|chan_id, chan| { + let counterparty_node_id = chan.get_counterparty_node_id(); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push((err, counterparty_node_id)); + } + if !retain_channel { return false; } + + if let Err(e) = chan.timer_check_closing_negotiation_progress() { + let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + handle_errors.push((Err(err), chan.get_counterparty_node_id())); + if needs_close { return false; } + } + match chan.channel_update_status() { ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), @@ -2693,20 +2755,13 @@ impl ChannelMana _ => {}, } - let counterparty_node_id = chan.get_counterparty_node_id(); - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); - if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - if err.is_err() { - handle_errors.push((err, counterparty_node_id)); - } - retain_channel + true }); } for (err, counterparty_node_id) in handle_errors.drain(..) { let _ = handle_error!(self, err, counterparty_node_id); } - should_persist }); } @@ -3357,7 +3412,13 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (shutdown, closing_signed, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &self.keys_manager, &their_features, &msg), channel_state, chan_entry); + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } + + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); dropped_htlcs = htlcs; // Update the monitor with the shutdown script if necessary. @@ -3378,13 +3439,6 @@ impl ChannelMana msg, }); } - if let Some(msg) = closing_signed { - // TODO: Do not send this if the monitor update failed. - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: *counterparty_node_id, - msg, - }); - } break Ok(()); }, @@ -3570,8 +3624,8 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) = - match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) { + let (revoke_and_ack, commitment_signed, monitor_update) = + match chan.get_mut().commitment_signed(&msg, &self.logger) { Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), Err((Some(update), e)) => { assert!(chan.get().is_awaiting_monitor_update()); @@ -3583,7 +3637,6 @@ impl ChannelMana }; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); - //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), @@ -3602,12 +3655,6 @@ impl ChannelMana }, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } Ok(()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3663,12 +3710,12 @@ impl ChannelMana break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) = - break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan); + let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) = + break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); htlcs_to_fail = htlcs_to_fail_in; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { if was_frozen_for_monitor { - assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); } else { if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) { @@ -3682,12 +3729,6 @@ impl ChannelMana updates, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap())) }, hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3931,7 +3972,7 @@ impl ChannelMana }); } - let has_update = has_monitor_update || !failed_htlcs.is_empty(); + let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty(); for (failures, channel_id) in failed_htlcs.drain(..) { self.fail_holding_cell_htlcs(failures, channel_id); } @@ -3943,6 +3984,63 @@ impl ChannelMana has_update } + /// Check whether any channels have finished removing all pending updates after a shutdown + /// exchange and can now send a closing_signed. + /// Returns whether any closing_signed messages were generated. + fn maybe_generate_initial_closing_signed(&self) -> bool { + let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); + let mut has_update = false; + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let by_id = &mut channel_state.by_id; + let short_to_id = &mut channel_state.short_to_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + + by_id.retain(|channel_id, chan| { + match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { + Ok((msg_opt, tx_opt)) => { + if let Some(msg) = msg_opt { + has_update = true; + pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: chan.get_counterparty_node_id(), msg, + }); + } + if let Some(tx) = tx_opt { + // We're done with this channel. We got a closing_signed and sent back + // a closing_signed with a closing transaction to broadcast. + if let Some(short_id) = chan.get_short_channel_id() { + short_to_id.remove(&short_id); + } + + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); + self.tx_broadcaster.broadcast_transaction(&tx); + false + } else { true } + }, + Err(e) => { + has_update = true; + let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + !close_channel + } + } + }); + } + + for (counterparty_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } + + has_update + } + /// Handle a list of channel failures during a block_connected or block_disconnected call, /// pushing the channel monitor update (if any) to the background events queue and removing the /// Channel object. @@ -4096,6 +4194,9 @@ impl MessageSend if self.check_free_holding_cells() { result = NotifyOption::DoPersist; } + if self.maybe_generate_initial_closing_signed() { + result = NotifyOption::DoPersist; + } let mut pending_events = Vec::new(); let mut channel_state = self.channel_state.lock().unwrap(); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 6c08065b..3b0a8452 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -416,12 +416,22 @@ macro_rules! get_htlc_update_msgs { } } +#[cfg(test)] +macro_rules! get_channel_ref { + ($node: expr, $lock: ident, $channel_id: expr) => { + { + $lock = $node.node.channel_state.lock().unwrap(); + $lock.by_id.get_mut(&$channel_id).unwrap() + } + } +} + #[cfg(test)] macro_rules! get_feerate { ($node: expr, $channel_id: expr) => { { - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&$channel_id).unwrap(); + let mut lock; + let chan = get_channel_ref!($node, lock, $channel_id); chan.get_feerate() } } @@ -755,7 +765,7 @@ macro_rules! check_closed_broadcast { pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) { let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) }; - let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) }; + let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) }; let (tx_a, tx_b); node_a.close_channel(channel_id).unwrap(); @@ -788,29 +798,33 @@ pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: let (as_update, bs_update) = if close_inbound_first { assert!(node_a.get_and_clear_pending_msg_events().is_empty()); node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); - assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); - tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); - let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); - node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()); - let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); - assert!(none_b.is_none()); + node_b.handle_closing_signed(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id())); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); + let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); + + node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); + let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); + assert!(none_a.is_none()); + assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); + tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); (as_update, bs_update) } else { let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a); - assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); - tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); - let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); + node_a.handle_closing_signed(&node_b.get_our_node_id(), &get_event_msg!(struct_b, MessageSendEvent::SendClosingSigned, node_a.get_our_node_id())); - node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); - let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); - assert!(none_a.is_none()); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); + let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); + + node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()); + let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); + assert!(none_b.is_none()); + assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); + tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); (as_update, bs_update) }; assert_eq!(tx_a, tx_b); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 1a20d86f..980c6ea4 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -28,10 +28,8 @@ use routing::network_graph::RoutingFees; use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction}; -use ln::script::ShutdownScript; use util::enforcing_trait_impls::EnforcingSigner; use util::{byte_utils, test_utils}; -use util::test_utils::OnGetShutdownScriptpubkey; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; use util::errors::APIError; use util::ser::{Writeable, ReadableArgs}; @@ -57,12 +55,10 @@ use io; use prelude::*; use alloc::collections::BTreeSet; use core::default::Default; -use core::num::NonZeroU8; use sync::{Arc, Mutex}; use ln::functional_test_utils::*; use ln::chan_utils::CommitmentTransaction; -use ln::msgs::OptionalField::Present; #[test] fn test_insane_channel_opens() { @@ -857,354 +853,6 @@ fn test_update_fee() { close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); } -#[test] -fn pre_funding_lock_shutdown_test() { - // Test sending a shutdown prior to funding_locked after funding generation - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::known(), InitFeatures::known()); - mine_transaction(&nodes[0], &tx); - mine_transaction(&nodes[1], &tx); - - nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap(); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - - let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); - - assert!(nodes[0].node.list_channels().is_empty()); - assert!(nodes[1].node.list_channels().is_empty()); -} - -#[test] -fn updates_shutdown_wait() { - // Test sending a shutdown with outstanding updates pending - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - - let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); - - nodes[0].node.close_channel(&chan_1.2).unwrap(); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]); - - let net_graph_msg_handler0 = &nodes[0].net_graph_msg_handler; - let net_graph_msg_handler1 = &nodes[1].net_graph_msg_handler; - let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler0.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); - let route_2 = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler1.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); - unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); - unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); - - assert!(nodes[2].node.claim_funds(our_payment_preimage)); - check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - expect_payment_forwarded!(nodes[1], Some(1000), false); - check_added_monitors!(nodes[1], 1); - let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - - assert!(updates_2.update_add_htlcs.is_empty()); - assert!(updates_2.update_fail_htlcs.is_empty()); - assert!(updates_2.update_fail_malformed_htlcs.is_empty()); - assert!(updates_2.update_fee.is_none()); - assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(our_payment_preimage, *payment_preimage); - }, - _ => panic!("Unexpected event"), - } - - let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); - - assert!(nodes[0].node.list_channels().is_empty()); - - assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); - assert!(nodes[1].node.list_channels().is_empty()); - assert!(nodes[2].node.list_channels().is_empty()); -} - -#[test] -fn htlc_fail_async_shutdown() { - // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); - nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - assert_eq!(updates.update_add_htlcs.len(), 1); - assert!(updates.update_fulfill_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - - nodes[1].node.close_channel(&chan_1.2).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); - - let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(updates_2.update_add_htlcs.is_empty()); - assert!(updates_2.update_fulfill_htlcs.is_empty()); - assert_eq!(updates_2.update_fail_htlcs.len(), 1); - assert!(updates_2.update_fail_malformed_htlcs.is_empty()); - assert!(updates_2.update_fee.is_none()); - - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); - - expect_payment_failed!(nodes[0], our_payment_hash, false); - - let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2); - let node_0_closing_signed = match msg_events[0] { - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - (*msg).clone() - }, - _ => panic!("Unexpected event"), - }; - match msg_events[1] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); - }, - _ => panic!("Unexpected event"), - } - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); - - assert!(nodes[0].node.list_channels().is_empty()); - - assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); - assert!(nodes[1].node.list_channels().is_empty()); - assert!(nodes[2].node.list_channels().is_empty()); -} - -fn do_test_shutdown_rebroadcast(recv_count: u8) { - // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of - // messages delivered prior to disconnect - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - - let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); - - nodes[1].node.close_channel(&chan_1.2).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - if recv_count > 0 { - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - if recv_count > 1 { - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - } - } - - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); - let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); - let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish); - let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - assert!(node_1_shutdown == node_1_2nd_shutdown); - - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish); - let node_0_2nd_shutdown = if recv_count > 0 { - let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); - node_0_2nd_shutdown - } else { - let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); - assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); - get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()) - }; - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown); - - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - assert!(nodes[2].node.claim_funds(our_payment_preimage)); - check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - expect_payment_forwarded!(nodes[1], Some(1000), false); - check_added_monitors!(nodes[1], 1); - let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - - assert!(updates_2.update_add_htlcs.is_empty()); - assert!(updates_2.update_fail_htlcs.is_empty()); - assert!(updates_2.update_fail_malformed_htlcs.is_empty()); - assert!(updates_2.update_fee.is_none()); - assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(our_payment_preimage, *payment_preimage); - }, - _ => panic!("Unexpected event"), - } - - let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - if recv_count > 0 { - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - assert!(node_1_closing_signed.is_some()); - } - - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); - let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); - if recv_count == 0 { - // If all closing_signeds weren't delivered we can just resume where we left off... - let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); - let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - assert!(node_0_2nd_shutdown == node_0_3rd_shutdown); - - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish); - let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - assert!(node_1_3rd_shutdown == node_1_2nd_shutdown); - - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown); - let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - assert!(node_0_closing_signed == node_0_2nd_closing_signed); - - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); - } else { - // If one node, however, received + responded with an identical closing_signed we end - // up erroring and node[0] will try to broadcast its own latest commitment transaction. - // There isn't really anything better we can do simply, but in the future we might - // explore storing a set of recently-closed channels that got disconnected during - // closing_signed and avoiding broadcasting local commitment txn for some timeout to - // give our counterparty enough time to (potentially) broadcast a cooperative closing - // transaction. - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { - match action { - &ErrorAction::SendErrorMessage { ref msg } => { - nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg); - assert_eq!(msg.channel_id, chan_1.2); - }, - _ => panic!("Unexpected event!"), - } - } else { panic!("Needed SendErrorMessage close"); } - - // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and - // checks it, but in this case nodes[0] didn't ever get a chance to receive a - // closing_signed so we do it ourselves - check_closed_broadcast!(nodes[0], false); - check_added_monitors!(nodes[0], 1); - } - - assert!(nodes[0].node.list_channels().is_empty()); - - assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); - assert!(nodes[1].node.list_channels().is_empty()); - assert!(nodes[2].node.list_channels().is_empty()); -} - -#[test] -fn test_shutdown_rebroadcast() { - do_test_shutdown_rebroadcast(0); - do_test_shutdown_rebroadcast(1); - do_test_shutdown_rebroadcast(2); -} - #[test] fn fake_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -7484,323 +7132,6 @@ fn test_sweep_outbound_htlc_failure_update() { do_test_sweep_outbound_htlc_failure_update(true, false); } -#[test] -fn test_upfront_shutdown_script() { - // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening - // enforce it at shutdown message - - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign - let flags = InitFeatures::known(); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone()); - nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); - // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer - nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str())); - check_added_monitors!(nodes[2], 1); - - // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone()); - nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); - // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign - nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let events = nodes[2].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - - // We test that if case of peer non-signaling we don't enforce committed script at channel opening - let flags_no = InitFeatures::known().clear_upfront_shutdown_script(); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone()); - nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - check_added_monitors!(nodes[1], 1); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - - // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close - // channel smoothly, opt-out is from channel initiator here - let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - - //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close - //// channel smoothly - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - match events[1] { - MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_unsupported_anysegwit_upfront_shutdown_script() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - // Use a non-v0 segwit script supported by option_shutdown_anysegwit - let node_features = InitFeatures::known().clear_shutdown_anysegwit(); - let anysegwit_shutdown_script = Builder::new() - .push_int(16) - .push_slice(&[0, 40]) - .into_script(); - - // Check script when handling an open_channel message - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); - let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - open_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), node_features.clone(), &open_channel); - - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[0].node.get_our_node_id()); - assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)"); - }, - _ => panic!("Unexpected event"), - } - - // Check script when handling an accept_channel message - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); - let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - accept_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone()); - nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), node_features, &accept_channel); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)"); - }, - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_invalid_upfront_shutdown_script() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); - - // Use a segwit v0 script with an unsupported witness program - let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(0) - .push_slice(&[0, 0]) - .into_script()); - nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[0].node.get_our_node_id()); - assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_0 OP_PUSHBYTES_2 0000)"); - }, - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_segwit_v0_shutdown_script() { - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - - // Use a segwit v0 script supported even without option_shutdown_anysegwit - let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = Builder::new().push_int(0) - .push_slice(&[0; 20]) - .into_script(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - match events[1] { - MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_anysegwit_shutdown_script() { - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - - // Use a non-v0 segwit script supported by option_shutdown_anysegwit - let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = Builder::new().push_int(16) - .push_slice(&[0, 0]) - .into_script(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - match events[1] { - MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_unsupported_anysegwit_shutdown_script() { - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - node_cfgs[0].features = InitFeatures::known().clear_shutdown_anysegwit(); - node_cfgs[1].features = InitFeatures::known().clear_shutdown_anysegwit(); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - // Check that using an unsupported shutdown script fails and a supported one succeeds. - let supported_shutdown_script = chanmon_cfgs[1].keys_manager.get_shutdown_scriptpubkey(); - let unsupported_shutdown_script = - ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap(); - chanmon_cfgs[1].keys_manager - .expect(OnGetShutdownScriptpubkey { returns: unsupported_shutdown_script.clone() }) - .expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script }); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, node_cfgs[0].features.clone(), node_cfgs[1].features.clone()); - match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()) { - Err(APIError::IncompatibleShutdownScript { script }) => { - assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner()); - }, - Err(e) => panic!("Unexpected error: {:?}", e), - Ok(_) => panic!("Expected error"), - } - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - - // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit - let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = unsupported_shutdown_script.into_inner(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_cfgs[1].features, &node_0_shutdown); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[1] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020028) from remote peer".to_owned()); - }, - _ => panic!("Unexpected event"), - } - check_added_monitors!(nodes[0], 1); -} - -#[test] -fn test_invalid_shutdown_script() { - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - - // Use a segwit v0 script with an unsupported witness program - let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = Builder::new().push_int(0) - .push_slice(&[0, 0]) - .into_script(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[1] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned()) - }, - _ => panic!("Unexpected event"), - } - check_added_monitors!(nodes[0], 1); -} - #[test] fn test_user_configurable_csv_delay() { // We test our channel constructors yield errors when we pass them absurd csv delay @@ -9023,7 +8354,7 @@ fn test_update_err_monitor_lockdown() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { - if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { + if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { if let Err(_) = watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); } } else { assert!(false); } @@ -9117,7 +8448,7 @@ fn test_concurrent_monitor_claim() { assert_eq!(updates.update_add_htlcs.len(), 1); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { - if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { + if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update if let Err(_) = watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 5576d8bd..d265888e 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -53,6 +53,7 @@ mod functional_tests; #[allow(unused_mut)] mod chanmon_update_fail_tests; #[cfg(test)] +#[allow(unused_mut)] mod reorg_tests; #[cfg(test)] #[allow(unused_mut)] @@ -60,6 +61,9 @@ mod onion_route_tests; #[cfg(test)] #[allow(unused_mut)] mod monitor_tests; +#[cfg(test)] +#[allow(unused_mut)] +mod shutdown_tests; pub use self::peer_channel_encryptor::LN_MAX_MSG_LEN; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 0042cf51..f94909a9 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -226,6 +226,19 @@ pub struct Shutdown { pub scriptpubkey: Script, } +/// The minimum and maximum fees which the sender is willing to place on the closing transaction. +/// This is provided in [`ClosingSigned`] by both sides to indicate the fee range they are willing +/// to use. +#[derive(Clone, Debug, PartialEq)] +pub struct ClosingSignedFeeRange { + /// The minimum absolute fee, in satoshis, which the sender is willing to place on the closing + /// transaction. + pub min_fee_satoshis: u64, + /// The maximum absolute fee, in satoshis, which the sender is willing to place on the closing + /// transaction. + pub max_fee_satoshis: u64, +} + /// A closing_signed message to be sent or received from a peer #[derive(Clone, Debug, PartialEq)] pub struct ClosingSigned { @@ -235,6 +248,9 @@ pub struct ClosingSigned { pub fee_satoshis: u64, /// A signature on the closing transaction pub signature: Signature, + /// The minimum and maximum fees which the sender is willing to accept, provided only by new + /// nodes. + pub fee_range: Option, } /// An update_add_htlc message to be sent or received from a peer @@ -1103,10 +1119,35 @@ impl Readable for ChannelReestablish{ } } -impl_writeable!(ClosingSigned, 32+8+64, { - channel_id, - fee_satoshis, - signature +impl Writeable for ClosingSigned { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + w.size_hint(32 + 8 + 64 + if self.fee_range.is_some() { 1+1+ 2*8 } else { 0 }); + self.channel_id.write(w)?; + self.fee_satoshis.write(w)?; + self.signature.write(w)?; + encode_tlv_stream!(w, { + (1, self.fee_range, option), + }); + Ok(()) + } +} + +impl Readable for ClosingSigned { + fn read(r: &mut R) -> Result { + let channel_id = Readable::read(r)?; + let fee_satoshis = Readable::read(r)?; + let signature = Readable::read(r)?; + let mut fee_range = None; + decode_tlv_stream!(r, { + (1, fee_range, option), + }); + Ok(Self { channel_id, fee_satoshis, signature, fee_range }) + } +} + +impl_writeable!(ClosingSignedFeeRange, 2*8, { + min_fee_satoshis, + max_fee_satoshis }); impl_writeable_len_match!(CommitmentSigned, { @@ -2323,10 +2364,27 @@ mod tests { channel_id: [2; 32], fee_satoshis: 2316138423780173, signature: sig_1, + fee_range: None, }; let encoded_value = closing_signed.encode(); let target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034dd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap(); assert_eq!(encoded_value, target_value); + assert_eq!(msgs::ClosingSigned::read(&mut Cursor::new(&target_value)).unwrap(), closing_signed); + + let closing_signed_with_range = msgs::ClosingSigned { + channel_id: [2; 32], + fee_satoshis: 2316138423780173, + signature: sig_1, + fee_range: Some(msgs::ClosingSignedFeeRange { + min_fee_satoshis: 0xdeadbeef, + max_fee_satoshis: 0x1badcafe01234567, + }), + }; + let encoded_value_with_range = closing_signed_with_range.encode(); + let target_value_with_range = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034dd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a011000000000deadbeef1badcafe01234567").unwrap(); + assert_eq!(encoded_value_with_range, target_value_with_range); + assert_eq!(msgs::ClosingSigned::read(&mut Cursor::new(&target_value_with_range)).unwrap(), + closing_signed_with_range); } #[test] diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs new file mode 100644 index 00000000..a40ad237 --- /dev/null +++ b/lightning/src/ln/shutdown_tests.rs @@ -0,0 +1,928 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests of our shutdown and closing_signed negotiation logic. + +use chain::keysinterface::KeysInterface; +use chain::transaction::OutPoint; +use ln::{PaymentPreimage, PaymentHash}; +use ln::channelmanager::PaymentSendFailure; +use routing::router::get_route; +use ln::features::{InitFeatures, InvoiceFeatures}; +use ln::msgs; +use ln::msgs::{ChannelMessageHandler, ErrorAction}; +use ln::script::ShutdownScript; +use util::test_utils; +use util::test_utils::OnGetShutdownScriptpubkey; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use util::errors::APIError; +use util::config::UserConfig; + +use bitcoin::blockdata::script::Builder; +use bitcoin::blockdata::opcodes; + +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; + +use regex; + +use core::default::Default; +use core::num::NonZeroU8; + +use ln::functional_test_utils::*; +use ln::msgs::OptionalField::Present; + +#[test] +fn pre_funding_lock_shutdown_test() { + // Test sending a shutdown prior to funding_locked after funding generation + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::known(), InitFeatures::known()); + mine_transaction(&nodes[0], &tx); + mine_transaction(&nodes[1], &tx); + + nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + assert!(nodes[1].node.list_channels().is_empty()); +} + +#[test] +fn updates_shutdown_wait() { + // Test sending a shutdown with outstanding updates pending + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + + nodes[0].node.close_channel(&chan_1.2).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]); + + let net_graph_msg_handler0 = &nodes[0].net_graph_msg_handler; + let net_graph_msg_handler1 = &nodes[1].net_graph_msg_handler; + let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler0.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let route_2 = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler1.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); + unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); + check_added_monitors!(nodes[1], 1); + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fail_htlcs.is_empty()); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(our_payment_preimage, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); +} + +#[test] +fn htlc_fail_async_shutdown() { + // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + + nodes[1].node.close_channel(&chan_1.2).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); + + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fulfill_htlcs.is_empty()); + assert_eq!(updates_2.update_fail_htlcs.len(), 1); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + expect_payment_failed!(nodes[0], our_payment_hash, false); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + match msg_events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { + assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); + }, + _ => panic!("Unexpected event"), + } + let node_0_closing_signed = match msg_events[1] { + MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }; + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); +} + +fn do_test_shutdown_rebroadcast(recv_count: u8) { + // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of + // messages delivered prior to disconnect + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + + nodes[1].node.close_channel(&chan_1.2).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + if recv_count > 0 { + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + if recv_count > 1 { + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish); + let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + assert!(node_1_shutdown == node_1_2nd_shutdown); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish); + let node_0_2nd_shutdown = if recv_count > 0 { + let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); + node_0_2nd_shutdown + } else { + let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); + get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()) + }; + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); + check_added_monitors!(nodes[1], 1); + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fail_htlcs.is_empty()); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(our_payment_preimage, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + if recv_count > 0 { + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_2nd_closing_signed.is_some()); + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + if recv_count == 0 { + // If all closing_signeds weren't delivered we can just resume where we left off... + let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); + let node_0_msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(node_0_msgs.len(), 2); + let node_0_2nd_closing_signed = match node_0_msgs[1] { + MessageSendEvent::SendClosingSigned { ref msg, .. } => { + assert_eq!(node_0_closing_signed, *msg); + msg.clone() + }, + _ => panic!(), + }; + + let node_0_3rd_shutdown = match node_0_msgs[0] { + MessageSendEvent::SendShutdown { ref msg, .. } => { + assert_eq!(node_0_2nd_shutdown, *msg); + msg.clone() + }, + _ => panic!(), + }; + assert!(node_0_2nd_shutdown == node_0_3rd_shutdown); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish); + let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + assert!(node_1_3rd_shutdown == node_1_2nd_shutdown); + + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown); + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + } else { + // If one node, however, received + responded with an identical closing_signed we end + // up erroring and node[0] will try to broadcast its own latest commitment transaction. + // There isn't really anything better we can do simply, but in the future we might + // explore storing a set of recently-closed channels that got disconnected during + // closing_signed and avoiding broadcasting local commitment txn for some timeout to + // give our counterparty enough time to (potentially) broadcast a cooperative closing + // transaction. + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &msg); + assert_eq!(msg.channel_id, chan_1.2); + }, + _ => panic!("Unexpected event!"), + } + } else { panic!("Needed SendErrorMessage close"); } + + // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and + // checks it, but in this case nodes[1] didn't ever get a chance to receive a + // closing_signed so we do it ourselves + check_closed_broadcast!(nodes[1], false); + check_added_monitors!(nodes[1], 1); + } + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); +} + +#[test] +fn test_shutdown_rebroadcast() { + do_test_shutdown_rebroadcast(0); + do_test_shutdown_rebroadcast(1); + do_test_shutdown_rebroadcast(2); +} + +#[test] +fn test_upfront_shutdown_script() { + // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening + // enforce it at shutdown message + + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign + let flags = InitFeatures::known(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone()); + nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); + // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer + nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str())); + check_added_monitors!(nodes[2], 1); + + // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone()); + nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); + // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign + nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + + // We test that if case of peer non-signaling we don't enforce committed script at channel opening + let flags_no = InitFeatures::known().clear_upfront_shutdown_script(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone()); + nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + check_added_monitors!(nodes[1], 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + + // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close + // channel smoothly, opt-out is from channel initiator here + let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + + //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close + //// channel smoothly + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_unsupported_anysegwit_upfront_shutdown_script() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Use a non-v0 segwit script supported by option_shutdown_anysegwit + let node_features = InitFeatures::known().clear_shutdown_anysegwit(); + let anysegwit_shutdown_script = Builder::new() + .push_int(16) + .push_slice(&[0, 40]) + .into_script(); + + // Check script when handling an open_channel message + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), node_features.clone(), &open_channel); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)"); + }, + _ => panic!("Unexpected event"), + } + + // Check script when handling an accept_channel message + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + accept_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone()); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), node_features, &accept_channel); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)"); + }, + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_invalid_upfront_shutdown_script() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + + // Use a segwit v0 script with an unsupported witness program + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(0) + .push_slice(&[0, 0]) + .into_script()); + nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_0 OP_PUSHBYTES_2 0000)"); + }, + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_segwit_v0_shutdown_script() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + + // Use a segwit v0 script supported even without option_shutdown_anysegwit + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = Builder::new().push_int(0) + .push_slice(&[0; 20]) + .into_script(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_anysegwit_shutdown_script() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + + // Use a non-v0 segwit script supported by option_shutdown_anysegwit + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = Builder::new().push_int(16) + .push_slice(&[0, 0]) + .into_script(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_unsupported_anysegwit_shutdown_script() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + node_cfgs[0].features = InitFeatures::known().clear_shutdown_anysegwit(); + node_cfgs[1].features = InitFeatures::known().clear_shutdown_anysegwit(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Check that using an unsupported shutdown script fails and a supported one succeeds. + let supported_shutdown_script = chanmon_cfgs[1].keys_manager.get_shutdown_scriptpubkey(); + let unsupported_shutdown_script = + ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap(); + chanmon_cfgs[1].keys_manager + .expect(OnGetShutdownScriptpubkey { returns: unsupported_shutdown_script.clone() }) + .expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script }); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, node_cfgs[0].features.clone(), node_cfgs[1].features.clone()); + match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()) { + Err(APIError::IncompatibleShutdownScript { script }) => { + assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner()); + }, + Err(e) => panic!("Unexpected error: {:?}", e), + Ok(_) => panic!("Expected error"), + } + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + + // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = unsupported_shutdown_script.into_inner(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_cfgs[1].features, &node_0_shutdown); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[1] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020028) from remote peer".to_owned()); + }, + _ => panic!("Unexpected event"), + } + check_added_monitors!(nodes[0], 1); +} + +#[test] +fn test_invalid_shutdown_script() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + + // Use a segwit v0 script with an unsupported witness program + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = Builder::new().push_int(0) + .push_slice(&[0, 0]) + .into_script(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[1] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned()) + }, + _ => panic!("Unexpected event"), + } + check_added_monitors!(nodes[0], 1); +} + +#[derive(PartialEq)] +enum TimeoutStep { + AfterShutdown, + AfterClosingSigned, + NoTimeout, +} + +fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { + // The range-based closing signed negotiation allows the funder to restart the process with a + // new range if the previous range did not overlap. This allows implementations to request user + // intervention allowing users to enter a new fee range. We do not implement the sending side + // of this, instead opting to allow users to enter an explicit "willing to pay up to X to avoid + // force-closing" value and relying on that instead. + // + // Here we run test the fundee side of that restart mechanism, implementing the funder side of + // it manually. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + + send_payment(&nodes[0], &[&nodes[1]], 8_000_000); + + nodes[0].node.close_channel(&chan_id).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + { + // Now we set nodes[1] to require a relatively high feerate for closing. This should result + // in it rejecting nodes[0]'s initial closing_signed, giving nodes[0] a chance to try + // again. + let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock *= 10; + } + + let mut node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + // nodes[0] should use a "reasonable" feerate, well under the 10 sat/vByte that nodes[1] thinks + // is the current prevailing feerate. + assert!(node_0_closing_signed.fee_satoshis <= 500); + + if timeout_step != TimeoutStep::AfterShutdown { + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + // At this point nodes[1] should send back a warning message indicating it disagrees with the + // given channel-closing fee. Currently we do not implement warning messages so instead we + // remain silent here. + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Now deliver a mutated closing_signed indicating a higher acceptable fee range, which + // nodes[1] should happily accept and respond to. + node_0_closing_signed.fee_range.as_mut().unwrap().max_fee_satoshis *= 10; + { + let mut lock; + get_channel_ref!(nodes[0], lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10; + } + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + if timeout_step == TimeoutStep::NoTimeout { + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.1.unwrap()); + } + } + + if timeout_step != TimeoutStep::NoTimeout { + assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + } else { + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + } + + nodes[1].node.timer_tick_occurred(); + nodes[1].node.timer_tick_occurred(); + + let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(txn.len(), 1); + assert_eq!(txn[0].output.len(), 2); + + if timeout_step != TimeoutStep::NoTimeout { + assert!((txn[0].output[0].script_pubkey.is_v0_p2wpkh() && + txn[0].output[1].script_pubkey.is_v0_p2wsh()) || + (txn[0].output[1].script_pubkey.is_v0_p2wpkh() && + txn[0].output[0].script_pubkey.is_v0_p2wsh())); + check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); + } else { + assert!(txn[0].output[0].script_pubkey.is_v0_p2wpkh()); + assert!(txn[0].output[1].script_pubkey.is_v0_p2wpkh()); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + } +} + +#[test] +fn test_closing_signed_reinit_timeout() { + do_test_closing_signed_reinit_timeout(TimeoutStep::AfterShutdown); + do_test_closing_signed_reinit_timeout(TimeoutStep::AfterClosingSigned); + do_test_closing_signed_reinit_timeout(TimeoutStep::NoTimeout); +} + +fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) { + // A simpe test of the legacy shutdown fee negotiation logic. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + if high_initiator_fee { + // If high_initiator_fee is set, set nodes[0]'s feerate significantly higher. This + // shouldn't impact the flow at all given nodes[1] will happily accept the higher fee. + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock *= 10; + } + + nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + let mut node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + node_0_closing_signed.fee_range = None; + if high_initiator_fee { + assert!(node_0_closing_signed.fee_satoshis > 500); + } else { + assert!(node_0_closing_signed.fee_satoshis < 500); + } + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let (_, mut node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + node_1_closing_signed.as_mut().unwrap().fee_range = None; + + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); +} + +#[test] +fn simple_legacy_shutdown_test() { + do_simple_legacy_shutdown_test(false); + do_simple_legacy_shutdown_test(true); +} + +#[test] +fn simple_target_feerate_shutdown() { + // Simple test of target in `close_channel_with_target_feerate`. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_id = OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(); + + nodes[0].node.close_channel_with_target_feerate(&chan_id, 253 * 10).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.close_channel_with_target_feerate(&chan_id, 253 * 5).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let (_, node_1_closing_signed_opt) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + let node_1_closing_signed = node_1_closing_signed_opt.unwrap(); + + // nodes[1] was passed a target which was larger than the current channel feerate, which it + // should ignore in favor of the channel fee, as there is no use demanding a minimum higher + // than what will be paid on a force-close transaction. Note that we have to consider rounding, + // so only check that we're within 10 sats. + assert!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis >= + node_1_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis * 10 - 5); + assert!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis <= + node_1_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis * 10 + 5); + + // Further, because nodes[0]'s target fee is larger than the `Normal` fee estimation plus our + // force-closure-avoidance buffer, min should equal max, and the nodes[1]-selected fee should + // be the nodes[0] only available fee. + assert_eq!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis, + node_0_closing_signed.fee_range.as_ref().unwrap().max_fee_satoshis); + assert_eq!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis, + node_0_closing_signed.fee_satoshis); + assert_eq!(node_0_closing_signed.fee_satoshis, node_1_closing_signed.fee_satoshis); + + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); +} diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index 3348b7b2..fb4f6fce 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -223,6 +223,29 @@ pub struct ChannelConfig { /// /// Default value: 5_000_000 msat. pub max_dust_htlc_exposure_msat: u64, + /// The additional fee we're willing to pay to avoid waiting for the counterparty's + /// `to_self_delay` to reclaim funds. + /// + /// When we close a channel cooperatively with our counterparty, we negotiate a fee for the + /// closing transaction which both sides find acceptable, ultimately paid by the channel + /// funder/initiator. + /// + /// When we are the funder, because we have to pay the channel closing fee, we bound the + /// acceptable fee by our [`Background`] and [`Normal`] fees, with the upper bound increased by + /// this value. Because the on-chain fee we'd pay to force-close the channel is kept near our + /// [`Normal`] feerate during normal operation, this value represents the additional fee we're + /// willing to pay in order to avoid waiting for our counterparty's to_self_delay to reclaim our + /// funds. + /// + /// When we are not the funder, we require the closing transaction fee pay at least our + /// [`Background`] fee estimate, but allow our counterparty to pay as much fee as they like. + /// Thus, this value is ignored when we are not the funder. + /// + /// Default value: 1000 satoshis. + /// + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + pub force_close_avoidance_max_fee_satoshis: u64, } impl Default for ChannelConfig { @@ -235,6 +258,7 @@ impl Default for ChannelConfig { announced_channel: false, commit_upfront_shutdown_pubkey: true, max_dust_htlc_exposure_msat: 5_000_000, + force_close_avoidance_max_fee_satoshis: 1000, } } } @@ -243,6 +267,7 @@ impl_writeable_tlv_based!(ChannelConfig, { (0, forwarding_fee_proportional_millionths, required), (1, max_dust_htlc_exposure_msat, (default_value, 5_000_000)), (2, cltv_expiry_delta, required), + (3, force_close_avoidance_max_fee_satoshis, (default_value, 1000)), (4, announced_channel, required), (6, commit_upfront_shutdown_pubkey, required), (8, forwarding_fee_base_msat, required), diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index af1bebe6..5178732c 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -168,6 +168,7 @@ macro_rules! decode_tlv_stream { ($stream: expr, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => { { use ln::msgs::DecodeError; let mut last_seen_type: Option = None; + let mut stream_ref = $stream; 'tlv_read: loop { use util::ser; @@ -177,7 +178,7 @@ macro_rules! decode_tlv_stream { // determine whether we should break or return ShortRead if we get an // UnexpectedEof. This should in every case be largely cosmetic, but its nice to // pass the TLV test vectors exactly, which requre this distinction. - let mut tracking_reader = ser::ReadTrackingReader::new($stream); + let mut tracking_reader = ser::ReadTrackingReader::new(&mut stream_ref); match ser::Readable::read(&mut tracking_reader) { Err(DecodeError::ShortRead) => { if !tracking_reader.have_read { @@ -205,8 +206,8 @@ macro_rules! decode_tlv_stream { last_seen_type = Some(typ.0); // Finally, read the length and value itself: - let length: ser::BigSize = ser::Readable::read($stream)?; - let mut s = ser::FixedLengthReader::new($stream, length.0); + let length: ser::BigSize = ser::Readable::read(&mut stream_ref)?; + let mut s = ser::FixedLengthReader::new(&mut stream_ref, length.0); match typ.0 { $($type => { decode_tlv!(s, $field, $fieldty);