-# 0.0.100 - WIP
+# 0.0.100 - 2021-08-17
+
+## API Updates
+ * The `lightning` crate can now be built in no_std mode, making it easy to
+ target embedded hardware for rust users. Note that mutexes are replaced with
+ no-ops for such builds (#1008, #1028).
+ * LDK now supports sending and receiving "keysend" payments. This includes
+ modifications to `lightning::util::events::Event::PaymentReceived` to
+ indicate the type of payment (#967).
+ * A new variant, `lightning::util::events::Event::PaymentForwarded` has been
+ added which indicates a forwarded payment has been successfully claimed and
+ we've received a forwarding fee (#1004).
+ * `lightning::chain::keysinterface::KeysInterface::get_shutdown_pubkey` has
+ been renamed to `get_shutdown_scriptpubkey`, returns a script, and is now
+ called on channel open only if
+ `lightning::util::config::ChannelConfig::commit_upfront_shutdown_pubkey` is
+ set (#1019).
+ * Closing-signed negotiation is now more configurable, with an explicit
+ `lightning::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis`
+ field allowing you to select the maximum amount you are willing to pay to
+ avoid a force-closure. Further, we are now less restrictive on the fee
+ placed on the closing transaction when we are not the party paying it. To
+ control the feerate paid on a channel at close-time, use
+ `ChannelManager::close_channel_with_target_feerate` instead of
+ `close_channel` (#1011).
+ * `lightning_background_processor::BackgroundProcessor` now stops the
+ background thread when dropped (#1007). It is marked `#[must_use]` so that
+ Rust users will receive a compile-time warning when it is immediately
+ dropped after construction (#1029).
+ * Total potential funds burn on force-close due to dust outputs is now limited
+ to `lightning::util::config::ChannelConfig::max_dust_htlc_exposure_msat` per
+ channel (#1009).
+ * The interval on which
+ `lightning::ln::peer_handler::PeerManager::timer_tick_occurred` should be
+ called has been reduced to once every five seconds (#1035) and
+ `lightning::ln::channelmanager::ChannelManager::timer_tick_occurred` should
+ now be called on startup in addition to once per minute (#985).
+ * The rust-bitcoin and bech32 dependencies have been updated to their
+ respective latest versions (0.27 and 0.8, #1012).
+
+## Bug Fixes
+ * Fix panic when reading invoices generated by some versions of c-lightning
+ (#1002 and #1003).
+ * Fix panic when attempting to validate a signed message of incorrect length
+ (#1010).
+ * Do not ignore the route hints in invoices when the invoice is over 250k
+ sats (#986).
+ * Fees are automatically updated on outbound channels to ensure commitment
+ transactions are always broadcastable (#985).
+ * Fixes a rare case where a `lightning::util::events::Event::SpendableOutputs`
+ event is not generated after a counterparty commitment transaction is
+ confirmed in a reorg when a conflicting local commitment transaction is
+ removed in the same reorg (#1022).
+ * Fixes a remotely-triggerable force-closure of an origin channel after an
+ HTLC was forwarded over a next-hop channel and the next-hop channel was
+ force-closed by our counterparty (#1025).
+ * Fixes a rare force-closure case when sending a payment as a channel fundee
+ when overdrawing our remaining balance. Instead the send will fail (#998).
+ * Fixes a rare force-closure case when a payment was claimed prior to a
+ peer disconnection or restart, and later failed (#977).
## Serialization Compatibility
+ * Pending inbound keysend payments which have neither been failed nor claimed
+ when serialized will result in a `ChannelManager` which is not readable on
+ pre-0.0.100 clients (#967).
+ * Because
+ `lightning::chain::keysinterface::KeysInterface::get_shutdown_scriptpubkey`
+ has been updated to return a script instead of only a `PublicKey`,
+ `ChannelManager`s constructed with custom `KeysInterface` implementations on
+ 0.0.100 and later versions will not be readable on previous versions.
+ `ChannelManager`s created with 0.0.99 and prior versions will remain readable
+ even after the a serialization roundtrip on 0.0.100, as long as no new
+ channels are opened. Further, users using a
+ `lightning::chain::keysinterface::KeysManager` as their `KeysInterface` will
+ have `ChannelManager`s which are readable on prior versions as well (#1019).
+ * `ChannelMonitorUpdate`s created by 0.0.100 and later for channels when
+ `lightning::util::config::ChannelConfig::commit_upfront_shutdown_pubkey` is
+ not set may not be readable by versions prior to 0.0.100 (#1019).
* HTLCs which were in the process of being claimed on-chain when a pre-0.0.100
`ChannelMonitor` was serialized may generate `PaymentForwarded` events with
spurious `fee_earned_msat` values. This only applies to payments which were
- unresolved at the time of the upgrade.
- * 0.0.100 clients with pending PaymentForwarded events at serialization-time
- will generate serialized `ChannelManager` objects which 0.0.99 and earlier
- clients cannot read. The likelihood of this can be reduced by ensuring you
- process all pending events immediately before serialization (as is done by
- the `lightning-background-processor` crate).
+ unresolved at the time of the upgrade (#1004).
+ * 0.0.100 clients with pending `Event::PaymentForwarded` events at
+ serialization-time will generate serialized `ChannelManager` objects which
+ 0.0.99 and earlier clients cannot read. The likelihood of this can be reduced
+ by ensuring you process all pending events immediately before serialization
+ (as is done by the `lightning-background-processor` crate, #1004).
+
+
+In total, this release features 59 files changed, 5861 insertions, and 2082
+deletions in 95 commits from 6 authors.
# 0.0.99 - 2021-07-09
cargo check
cargo doc
cargo doc --document-private-items
-cd fuzz && cargo check --features=stdin_fuzz
+cd fuzz && RUSTFLAGS="--cfg=fuzzing" cargo check --features=stdin_fuzz
cd ../lightning && cargo check --no-default-features --features=no-std
use lightning::chain::keysinterface::{KeysInterface, InMemorySigner};
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::channelmanager::{ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs};
+use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
+use lightning::ln::script::ShutdownScript;
use lightning::util::enforcing_trait_impls::{EnforcingSigner, INITIAL_REVOKED_COMMITMENT_NUMBER};
use lightning::util::errors::APIError;
use lightning::util::events;
use bitcoin::secp256k1::Secp256k1;
use std::mem;
-use std::cmp::Ordering;
+use std::cmp::{self, Ordering};
use std::collections::{HashSet, hash_map, HashMap};
use std::sync::{Arc,Mutex};
use std::sync::atomic;
use std::io::Cursor;
-struct FuzzEstimator {}
+const MAX_FEE: u32 = 10_000;
+struct FuzzEstimator {
+ ret_val: atomic::AtomicU32,
+}
impl FeeEstimator for FuzzEstimator {
- fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
- 253
+ fn get_est_sat_per_1000_weight(&self, conf_target: ConfirmationTarget) -> u32 {
+ // We force-close channels if our counterparty sends us a feerate which is a small multiple
+ // of our HighPriority fee estimate or smaller than our Background fee estimate. Thus, we
+ // always return a HighPriority feerate here which is >= the maximum Normal feerate and a
+ // Background feerate which is <= the minimum Normal feerate.
+ match conf_target {
+ ConfirmationTarget::HighPriority => MAX_FEE,
+ ConfirmationTarget::Background => 253,
+ ConfirmationTarget::Normal => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE),
+ }
}
}
};
let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::
read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1;
- deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap();
+ deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
let mut ser = VecWriter(Vec::new());
deserialized_monitor.write(&mut ser).unwrap();
map_entry.insert((update.update_id, ser.0));
Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
}
- fn get_shutdown_pubkey(&self) -> PublicKey {
+ fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
let secp_ctx = Secp256k1::signing_only();
- PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap())
+ let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap();
+ let pubkey_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize());
+ ShutdownScript::new_p2wpkh(&pubkey_hash)
}
fn get_channel_signer(&self, _inbound: bool, channel_value_satoshis: u64) -> EnforcingSigner {
_ if err.starts_with("Cannot send value that would put counterparty balance under holder-announced channel reserve value") => {},
_ if err.starts_with("Cannot send value that would overdraw remaining funds.") => {},
_ if err.starts_with("Cannot send value that would not leave enough to pay for fees.") => {},
+ _ if err.starts_with("Cannot send value that would put our exposure to dust HTLCs at") => {},
_ => panic!("{}", err),
}
},
APIError::MonitorUpdateFailed => {
// We can (obviously) temp-fail a monitor update
},
+ APIError::IncompatibleShutdownScript { .. } => panic!("Cannot send an incompatible shutdown script"),
}
}
#[inline]
#[inline]
pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
- let fee_est = Arc::new(FuzzEstimator{});
let broadcast = Arc::new(TestBroadcaster{});
macro_rules! make_node {
- ($node_id: expr) => { {
+ ($node_id: expr, $fee_estimator: expr) => { {
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), revoked_commitments: Mutex::new(HashMap::new()) });
- let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager)));
+ let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager)));
let mut config = UserConfig::default();
config.channel_options.forwarding_fee_proportional_millionths = 0;
network,
best_block: BestBlock::from_genesis(network),
};
- (ChannelManager::new(fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params),
+ (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params),
monitor, keys_manager)
} }
}
macro_rules! reload_node {
- ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr) => { {
+ ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { {
let keys_manager = Arc::clone(& $keys_manager);
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
- let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager)));
+ let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager)));
let mut config = UserConfig::default();
config.channel_options.forwarding_fee_proportional_millionths = 0;
let read_args = ChannelManagerReadArgs {
keys_manager,
- fee_estimator: fee_est.clone(),
+ fee_estimator: $fee_estimator.clone(),
chain_monitor: chain_monitor.clone(),
tx_broadcaster: broadcast.clone(),
logger,
let mut channel_txn = Vec::new();
macro_rules! make_channel {
($source: expr, $dest: expr, $chan_id: expr) => { {
+ $source.peer_connected(&$dest.get_our_node_id(), &Init { features: InitFeatures::known() });
+ $dest.peer_connected(&$source.get_our_node_id(), &Init { features: InitFeatures::known() });
+
$source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
let open_channel = {
let events = $source.get_and_clear_pending_msg_events();
} }
}
+ let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
+ let mut last_htlc_clear_fee_a = 253;
+ let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
+ let mut last_htlc_clear_fee_b = 253;
+ let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
+ let mut last_htlc_clear_fee_c = 253;
+
// 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
// forwarding.
- let (node_a, mut monitor_a, keys_manager_a) = make_node!(0);
- let (node_b, mut monitor_b, keys_manager_b) = make_node!(1);
- let (node_c, mut monitor_c, keys_manager_c) = make_node!(2);
+ let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a);
+ let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b);
+ let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c);
let mut nodes = [node_a, node_b, node_c];
had_events = true;
match event {
events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => {
- for dest in nodes.iter() {
+ for (idx, dest) in nodes.iter().enumerate() {
if dest.get_our_node_id() == node_id {
- assert!(update_fee.is_none());
for update_add in update_add_htlcs.iter() {
+ out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes());
if !$corrupt_forward {
dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add);
} else {
}
}
for update_fulfill in update_fulfill_htlcs.iter() {
+ out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes());
dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill);
}
for update_fail in update_fail_htlcs.iter() {
+ out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes());
dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail);
}
for update_fail_malformed in update_fail_malformed_htlcs.iter() {
+ out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes());
dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed);
}
+ if let Some(msg) = update_fee {
+ out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes());
+ dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg);
+ }
let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() ||
!update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty();
if $limit_events != ProcessMessages::AllMessages && processed_change {
} });
break;
}
+ out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes());
dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
break;
}
}
},
events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
- for dest in nodes.iter() {
+ for (idx, dest) in nodes.iter().enumerate() {
if dest.get_our_node_id() == *node_id {
+ out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes());
dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
}
}
},
events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
- for dest in nodes.iter() {
+ for (idx, dest) in nodes.iter().enumerate() {
if dest.get_our_node_id() == *node_id {
+ out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes());
dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
}
}
} }
}
- match get_slice!(1)[0] {
+ let v = get_slice!(1)[0];
+ out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes());
+ match v {
// In general, we keep related message groups close together in binary form, allowing
// bit-twiddling mutations to have similar effects. This is probably overkill, but no
// harm in doing so.
node_a_ser.0.clear();
nodes[0].write(&mut node_a_ser).unwrap();
}
- let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a);
+ let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
nodes[0] = new_node_a;
monitor_a = new_monitor_a;
},
bc_events.clear();
cb_events.clear();
}
- let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b);
+ let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b);
nodes[1] = new_node_b;
monitor_b = new_monitor_b;
},
node_c_ser.0.clear();
nodes[2].write(&mut node_c_ser).unwrap();
}
- let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c);
+ let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
nodes[2] = new_node_c;
monitor_c = new_monitor_c;
},
0x6c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id); },
0x6d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id); },
+ 0x80 => {
+ let max_feerate = last_htlc_clear_fee_a * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
+ if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
+ fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release);
+ }
+ nodes[0].maybe_update_chan_fees();
+ },
+ 0x81 => { fee_est_a.ret_val.store(253, atomic::Ordering::Release); nodes[0].maybe_update_chan_fees(); },
+
+ 0x84 => {
+ let max_feerate = last_htlc_clear_fee_b * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
+ if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
+ fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release);
+ }
+ nodes[1].maybe_update_chan_fees();
+ },
+ 0x85 => { fee_est_b.ret_val.store(253, atomic::Ordering::Release); nodes[1].maybe_update_chan_fees(); },
+
+ 0x88 => {
+ let max_feerate = last_htlc_clear_fee_c * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
+ if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
+ fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release);
+ }
+ nodes[2].maybe_update_chan_fees();
+ },
+ 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); },
+
0xff => {
// Test that no channel is in a stuck state where neither party can send funds even
// after we resolve all pending events.
assert!(
send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id) ||
send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id));
+
+ last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire);
+ last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire);
+ last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire);
},
_ => test_return!(),
}
use lightning::ln::channelmanager::{ChainParameters, ChannelManager};
use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor};
use lightning::ln::msgs::DecodeError;
+use lightning::ln::script::ShutdownScript;
use lightning::routing::router::get_route;
use lightning::routing::network_graph::NetGraphMsgHandler;
use lightning::util::config::UserConfig;
Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
}
- fn get_shutdown_pubkey(&self) -> PublicKey {
+ fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
let secp_ctx = Secp256k1::signing_only();
- PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]).unwrap())
+ let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]).unwrap();
+ let pubkey_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize());
+ ShutdownScript::new_p2wpkh(&pubkey_hash)
}
fn get_channel_signer(&self, inbound: bool, channel_value_satoshis: u64) -> EnforcingSigner {
GEN_TEST AcceptChannel test_msg ""
GEN_TEST AnnouncementSignatures test_msg ""
GEN_TEST ChannelReestablish test_msg ""
-GEN_TEST ClosingSigned test_msg ""
GEN_TEST CommitmentSigned test_msg ""
GEN_TEST DecodedOnionErrorPacket test_msg ""
GEN_TEST FundingCreated test_msg ""
GEN_TEST ErrorMessage test_msg_hole ", 32, 2"
GEN_TEST ChannelUpdate test_msg_hole ", 108, 1"
+GEN_TEST ClosingSigned test_msg_simple ""
GEN_TEST Init test_msg_simple ""
GEN_TEST OnionHopData test_msg_simple ""
GEN_TEST Ping test_msg_simple ""
pub mod msg_accept_channel;
pub mod msg_announcement_signatures;
pub mod msg_channel_reestablish;
-pub mod msg_closing_signed;
pub mod msg_commitment_signed;
pub mod msg_decoded_onion_error_packet;
pub mod msg_funding_created;
pub mod msg_update_add_htlc;
pub mod msg_error_message;
pub mod msg_channel_update;
+pub mod msg_closing_signed;
pub mod msg_init;
pub mod msg_onion_hop_data;
pub mod msg_ping;
#[inline]
pub fn msg_closing_signed_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
- test_msg!(msgs::ClosingSigned, data);
+ test_msg_simple!(msgs::ClosingSigned, data);
}
#[no_mangle]
pub extern "C" fn msg_closing_signed_run(data: *const u8, datalen: usize) {
let data = unsafe { std::slice::from_raw_parts(data, datalen) };
- test_msg!(msgs::ClosingSigned, data);
+ test_msg_simple!(msgs::ClosingSigned, data);
}
[package]
name = "lightning-background-processor"
-version = "0.0.99"
+version = "0.0.100"
authors = ["Valentine Wallace <vwallace@protonmail.com>"]
license = "MIT OR Apache-2.0"
repository = "http://github.com/rust-bitcoin/rust-lightning"
[dependencies]
bitcoin = "0.27"
-lightning = { version = "0.0.99", path = "../lightning", features = ["allow_wallclock_use"] }
-lightning-persister = { version = "0.0.99", path = "../lightning-persister" }
+lightning = { version = "0.0.100", path = "../lightning", features = ["allow_wallclock_use"] }
+lightning-persister = { version = "0.0.100", path = "../lightning-persister" }
[dev-dependencies]
-lightning = { version = "0.0.99", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] }
#[cfg(test)]
const FRESHNESS_TIMER: u64 = 1;
+#[cfg(not(debug_assertions))]
+const PING_TIMER: u64 = 5;
+/// Signature operations take a lot longer without compiler optimisations.
+/// Increasing the ping timer allows for this but slower devices will be disconnected if the
+/// timeout is reached.
+#[cfg(debug_assertions)]
+const PING_TIMER: u64 = 30;
+
/// Trait which handles persisting a [`ChannelManager`] to disk.
///
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
let stop_thread = Arc::new(AtomicBool::new(false));
let stop_thread_clone = stop_thread.clone();
let handle = thread::spawn(move || -> Result<(), std::io::Error> {
- let mut current_time = Instant::now();
+ log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup");
+ channel_manager.timer_tick_occurred();
+
+ let mut last_freshness_call = Instant::now();
+ let mut last_ping_call = Instant::now();
loop {
peer_manager.process_events();
channel_manager.process_pending_events(&event_handler);
log_trace!(logger, "Terminating background processor.");
return Ok(());
}
- if current_time.elapsed().as_secs() > FRESHNESS_TIMER {
- log_trace!(logger, "Calling ChannelManager's and PeerManager's timer_tick_occurred");
+ if last_freshness_call.elapsed().as_secs() > FRESHNESS_TIMER {
+ log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
channel_manager.timer_tick_occurred();
+ last_freshness_call = Instant::now();
+ }
+ if last_ping_call.elapsed().as_secs() > PING_TIMER * 2 {
+ // On various platforms, we may be starved of CPU cycles for several reasons.
+ // E.g. on iOS, if we've been in the background, we will be entirely paused.
+ // Similarly, if we're on a desktop platform and the device has been asleep, we
+ // may not get any cycles.
+ // In any case, if we've been entirely paused for more than double our ping
+ // timer, we should have disconnected all sockets by now (and they're probably
+ // dead anyway), so disconnect them by calling `timer_tick_occurred()` twice.
+ log_trace!(logger, "Awoke after more than double our ping timer, disconnecting peers.");
+ peer_manager.timer_tick_occurred();
+ peer_manager.timer_tick_occurred();
+ last_ping_call = Instant::now();
+ } else if last_ping_call.elapsed().as_secs() > PING_TIMER {
+ log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
peer_manager.timer_tick_occurred();
- current_time = Instant::now();
+ last_ping_call = Instant::now();
}
}
});
use lightning::get_event_msg;
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, ChannelManager, SimpleArcChannelManager};
use lightning::ln::features::InitFeatures;
- use lightning::ln::msgs::ChannelMessageHandler;
+ use lightning::ln::msgs::{ChannelMessageHandler, Init};
use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor};
use lightning::util::config::UserConfig;
use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent};
let node = Node { node: manager, peer_manager, chain_monitor, persister, tx_broadcaster, logger, best_block };
nodes.push(node);
}
+
+ for i in 0..num_nodes {
+ for j in (i+1)..num_nodes {
+ nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: InitFeatures::known() });
+ nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: InitFeatures::known() });
+ }
+ }
+
nodes
}
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
loop {
let log_entries = nodes[0].logger.lines.lock().unwrap();
- let desired_log = "Calling ChannelManager's and PeerManager's timer_tick_occurred".to_string();
- if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() {
+ let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
+ let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
+ if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
+ log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
break
}
}
[package]
name = "lightning-block-sync"
-version = "0.0.99"
+version = "0.0.100"
authors = ["Jeffrey Czyz", "Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "http://github.com/rust-bitcoin/rust-lightning"
[dependencies]
bitcoin = "0.27"
-lightning = { version = "0.0.99", path = "../lightning" }
+lightning = { version = "0.0.100", path = "../lightning" }
tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
serde = { version = "1.0", features = ["derive"], optional = true }
serde_json = { version = "1.0", optional = true }
[package]
name = "lightning-invoice"
description = "Data structures to parse and serialize BOLT11 lightning invoices"
-version = "0.7.0"
+version = "0.8.0"
authors = ["Sebastian Geisler <sgeisler@wh2.tu-dresden.de>"]
documentation = "https://docs.rs/lightning-invoice/"
license = "MIT OR Apache-2.0"
[dependencies]
bech32 = "0.8"
-lightning = { version = "0.0.99", path = "../lightning" }
+lightning = { version = "0.0.100", path = "../lightning" }
secp256k1 = { version = "0.20", features = ["recovery"] }
num-traits = "0.2.8"
bitcoin_hashes = "0.10"
[dev-dependencies]
-lightning = { version = "0.0.99", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] }
[package]
name = "lightning-net-tokio"
-version = "0.0.99"
+version = "0.0.100"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-bitcoin/rust-lightning/"
[dependencies]
bitcoin = "0.27"
-lightning = { version = "0.0.99", path = "../lightning" }
+lightning = { version = "0.0.100", path = "../lightning" }
tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
[dev-dependencies]
PeerDisconnected
}
let disconnect_type = loop {
- macro_rules! shutdown_socket {
- ($err: expr, $need_disconnect: expr) => { {
- println!("Disconnecting peer due to {}!", $err);
- break $need_disconnect;
- } }
- }
-
let read_paused = {
let us_lock = us.lock().unwrap();
if us_lock.rl_requested_disconnect {
- shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection);
+ break Disconnect::CloseConnection;
}
us_lock.read_paused
};
tokio::select! {
v = write_avail_receiver.recv() => {
assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
- if let Err(e) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
- shutdown_socket!(e, Disconnect::CloseConnection);
+ if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
+ break Disconnect::CloseConnection;
}
},
_ = read_wake_receiver.recv() => {},
read = reader.read(&mut buf), if !read_paused => match read {
- Ok(0) => shutdown_socket!("Connection closed", Disconnect::PeerDisconnected),
+ Ok(0) => break Disconnect::PeerDisconnected,
Ok(len) => {
let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
let mut us_lock = us.lock().unwrap();
us_lock.read_paused = true;
}
},
- Err(e) => shutdown_socket!(e, Disconnect::CloseConnection),
+ Err(_) => break Disconnect::CloseConnection,
}
},
- Err(e) => shutdown_socket!(e, Disconnect::PeerDisconnected),
+ Err(_) => break Disconnect::PeerDisconnected,
},
}
peer_manager.process_events();
[package]
name = "lightning-persister"
-version = "0.0.99"
+version = "0.0.100"
authors = ["Valentine Wallace", "Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-bitcoin/rust-lightning/"
[dependencies]
bitcoin = "0.27"
-lightning = { version = "0.0.99", path = "../lightning" }
+lightning = { version = "0.0.100", path = "../lightning" }
libc = "0.2"
[target.'cfg(windows)'.dependencies]
winapi = { version = "0.3", features = ["winbase"] }
[dev-dependencies]
-lightning = { version = "0.0.99", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] }
[package]
name = "lightning"
-version = "0.0.99"
+version = "0.0.100"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-bitcoin/rust-lightning/"
/// An enum that represents the speed at which we want a transaction to confirm used for feerate
/// estimation.
+#[derive(Clone, Copy, PartialEq, Eq)]
pub enum ConfirmationTarget {
/// We are happy with this transaction confirming slowly when feerate drops some.
Background,
fn block_connected(&self, block: &Block, height: u32) {
let header = &block.header;
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
+ log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
self.process_chain_data(header, &txdata, |monitor, txdata| {
monitor.block_connected(
header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
let monitors = self.monitors.read().unwrap();
+ log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
for monitor in monitors.values() {
monitor.block_disconnected(
header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
P::Target: channelmonitor::Persist<ChannelSigner>,
{
fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
self.process_chain_data(header, txdata, |monitor, txdata| {
monitor.transactions_confirmed(
header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
}
fn transaction_unconfirmed(&self, txid: &Txid) {
+ log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
let monitors = self.monitors.read().unwrap();
for monitor in monitors.values() {
monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
}
fn best_block_updated(&self, header: &BlockHeader, height: u32) {
+ log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
self.process_chain_data(header, &[], |monitor, txdata| {
// While in practice there shouldn't be any recursive calls when given empty txdata,
// it's still possible if a chain::Filter implementation returns a transaction.
let len: u64 = Readable::read(r)?;
let mut updates = Vec::with_capacity(cmp::min(len as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<ChannelMonitorUpdateStep>()));
for _ in 0..len {
- updates.push(Readable::read(r)?);
+ if let Some(upd) = MaybeReadable::read(r)? {
+ updates.push(upd);
+ }
}
read_tlv_fields!(r, {});
Ok(Self { update_id, updates })
},
}
-impl_writeable_tlv_based!(OnchainEventEntry, {
- (0, txid, required),
- (2, height, required),
- (4, event, required),
-});
+impl Writeable for OnchainEventEntry {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ write_tlv_fields!(writer, {
+ (0, self.txid, required),
+ (2, self.height, required),
+ (4, self.event, required),
+ });
+ Ok(())
+ }
+}
-impl_writeable_tlv_based_enum!(OnchainEvent,
+impl MaybeReadable for OnchainEventEntry {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
+ let mut txid = Default::default();
+ let mut height = 0;
+ let mut event = None;
+ read_tlv_fields!(reader, {
+ (0, txid, required),
+ (2, height, required),
+ (4, event, ignorable),
+ });
+ if let Some(ev) = event {
+ Ok(Some(Self { txid, height, event: ev }))
+ } else {
+ Ok(None)
+ }
+ }
+}
+
+impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
(0, HTLCUpdate) => {
(0, source, required),
(1, onchain_value_satoshis, option),
(1, MaturingOutput) => {
(0, descriptor, required),
},
-;);
+);
#[cfg_attr(any(test, feature = "fuzztarget", feature = "_test_utils"), derive(PartialEq))]
#[derive(Clone)]
/// think we've fallen behind!
should_broadcast: bool,
},
+ ShutdownScript {
+ scriptpubkey: Script,
+ },
}
-impl_writeable_tlv_based_enum!(ChannelMonitorUpdateStep,
+impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep,
(0, LatestHolderCommitmentTXInfo) => {
(0, commitment_tx, required),
(2, htlc_outputs, vec_type),
(4, ChannelForceClosed) => {
(0, should_broadcast, required),
},
-;);
+ (5, ShutdownScript) => {
+ (0, scriptpubkey, required),
+ },
+);
/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
/// on-chain transactions to ensure no loss of funds occurs.
destination_script: Script,
broadcasted_holder_revokable_script: Option<(Script, PublicKey, PublicKey)>,
counterparty_payment_script: Script,
- shutdown_script: Script,
+ shutdown_script: Option<Script>,
channel_keys_id: [u8; 32],
holder_revocation_basepoint: PublicKey,
on_holder_tx_csv: u16,
commitment_secrets: CounterpartyCommitmentSecrets,
+ /// The set of outpoints in each counterparty commitment transaction. We always need at least
+ /// the payment hash from `HTLCOutputInCommitment` to claim even a revoked commitment
+ /// transaction broadcast as we need to be able to construct the witness script in all cases.
counterparty_claimable_outpoints: HashMap<Txid, Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
/// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain.
/// Nor can we figure out their commitment numbers without the commitment transaction they are
}
self.counterparty_payment_script.write(writer)?;
- self.shutdown_script.write(writer)?;
+ match &self.shutdown_script {
+ Some(script) => script.write(writer)?,
+ None => Script::new().write(writer)?,
+ }
self.channel_keys_id.write(writer)?;
self.holder_revocation_basepoint.write(writer)?;
}
impl<Signer: Sign> ChannelMonitor<Signer> {
- pub(crate) fn new(secp_ctx: Secp256k1<secp256k1::All>, keys: Signer, shutdown_pubkey: &PublicKey,
+ pub(crate) fn new(secp_ctx: Secp256k1<secp256k1::All>, keys: Signer, shutdown_script: Option<Script>,
on_counterparty_tx_csv: u16, destination_script: &Script, funding_info: (OutPoint, Script),
channel_parameters: &ChannelTransactionParameters,
funding_redeemscript: Script, channel_value_satoshis: u64,
best_block: BestBlock) -> ChannelMonitor<Signer> {
assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
- let our_channel_close_key_hash = WPubkeyHash::hash(&shutdown_pubkey.serialize());
- let shutdown_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_close_key_hash[..]).into_script();
let payment_key_hash = WPubkeyHash::hash(&keys.pubkeys().payment_point.serialize());
let counterparty_payment_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_key_hash[..]).into_script();
}
}
+/// Compares a broadcasted commitment transaction's HTLCs with those in the latest state,
+/// failing any HTLCs which didn't make it into the broadcasted commitment transaction back
+/// after ANTI_REORG_DELAY blocks.
+///
+/// We always compare against the set of HTLCs in counterparty commitment transactions, as those
+/// are the commitment transactions which are generated by us. The off-chain state machine in
+/// `Channel` will automatically resolve any HTLCs which were never included in a commitment
+/// transaction when it detects channel closure, but it is up to us to ensure any HTLCs which were
+/// included in a remote commitment transaction are failed back if they are not present in the
+/// broadcasted commitment transaction.
+///
+/// Specifically, the removal process for HTLCs in `Channel` is always based on the counterparty
+/// sending a `revoke_and_ack`, which causes us to clear `prev_counterparty_commitment_txid`. Thus,
+/// as long as we examine both the current counterparty commitment transaction and, if it hasn't
+/// been revoked yet, the previous one, we we will never "forget" to resolve an HTLC.
+macro_rules! fail_unbroadcast_htlcs {
+ ($self: expr, $commitment_tx_type: expr, $commitment_tx_conf_height: expr, $confirmed_htlcs_list: expr, $logger: expr) => { {
+ macro_rules! check_htlc_fails {
+ ($txid: expr, $commitment_tx: expr) => {
+ if let Some(ref latest_outpoints) = $self.counterparty_claimable_outpoints.get($txid) {
+ for &(ref htlc, ref source_option) in latest_outpoints.iter() {
+ if let &Some(ref source) = source_option {
+ // Check if the HTLC is present in the commitment transaction that was
+ // broadcast, but not if it was below the dust limit, which we should
+ // fail backwards immediately as there is no way for us to learn the
+ // payment_preimage.
+ // Note that if the dust limit were allowed to change between
+ // commitment transactions we'd want to be check whether *any*
+ // broadcastable commitment transaction has the HTLC in it, but it
+ // cannot currently change after channel initialization, so we don't
+ // need to here.
+ let confirmed_htlcs_iter: &mut Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
+ let mut matched_htlc = false;
+ for (ref broadcast_htlc, ref broadcast_source) in confirmed_htlcs_iter {
+ if broadcast_htlc.transaction_output_index.is_some() && Some(&**source) == *broadcast_source {
+ matched_htlc = true;
+ break;
+ }
+ }
+ if matched_htlc { continue; }
+ $self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
+ if entry.height != $commitment_tx_conf_height { return true; }
+ match entry.event {
+ OnchainEvent::HTLCUpdate { source: ref update_source, .. } => {
+ *update_source != **source
+ },
+ _ => true,
+ }
+ });
+ let entry = OnchainEventEntry {
+ txid: *$txid,
+ height: $commitment_tx_conf_height,
+ event: OnchainEvent::HTLCUpdate {
+ source: (**source).clone(),
+ payment_hash: htlc.payment_hash.clone(),
+ onchain_value_satoshis: Some(htlc.amount_msat / 1000),
+ },
+ };
+ log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction, waiting for confirmation (at height {})",
+ log_bytes!(htlc.payment_hash.0), $commitment_tx, $commitment_tx_type, entry.confirmation_threshold());
+ $self.onchain_events_awaiting_threshold_conf.push(entry);
+ }
+ }
+ }
+ }
+ }
+ if let Some(ref txid) = $self.current_counterparty_commitment_txid {
+ check_htlc_fails!(txid, "current");
+ }
+ if let Some(ref txid) = $self.prev_counterparty_commitment_txid {
+ check_htlc_fails!(txid, "previous");
+ }
+ } }
+}
+
impl<Signer: Sign> ChannelMonitorImpl<Signer> {
/// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
/// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen
// shouldn't print the scary warning above.
log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction.");
}
- }
+ },
+ ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey } => {
+ log_trace!(logger, "Updating ChannelMonitor with shutdown script");
+ if let Some(shutdown_script) = self.shutdown_script.replace(scriptpubkey.clone()) {
+ panic!("Attempted to replace shutdown script {} with {}", shutdown_script, scriptpubkey);
+ }
+ },
}
}
self.latest_update_id = updates.update_id;
}
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
- macro_rules! check_htlc_fails {
- ($txid: expr, $commitment_tx: expr) => {
- if let Some(ref outpoints) = self.counterparty_claimable_outpoints.get($txid) {
- for &(ref htlc, ref source_option) in outpoints.iter() {
- if let &Some(ref source) = source_option {
- self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
- if entry.height != height { return true; }
- match entry.event {
- OnchainEvent::HTLCUpdate { source: ref update_source, .. } => {
- *update_source != **source
- },
- _ => true,
- }
- });
- let entry = OnchainEventEntry {
- txid: *$txid,
- height,
- event: OnchainEvent::HTLCUpdate {
- source: (**source).clone(),
- payment_hash: htlc.payment_hash.clone(),
- onchain_value_satoshis: Some(htlc.amount_msat / 1000),
- },
- };
- log_info!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of revoked counterparty commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, entry.confirmation_threshold());
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
- }
- }
- }
- if let Some(ref txid) = self.current_counterparty_commitment_txid {
- check_htlc_fails!(txid, "current");
- }
- if let Some(ref txid) = self.prev_counterparty_commitment_txid {
- check_htlc_fails!(txid, "counterparty");
- }
- // No need to check holder commitment txn, symmetric HTLCSource must be present as per-htlc data on counterparty commitment tx
+ fail_unbroadcast_htlcs!(self, "revoked counterparty", height, [].iter().map(|a| *a), logger);
}
} else if let Some(per_commitment_data) = per_commitment_option {
// While this isn't useful yet, there is a potential race where if a counterparty
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
-
- macro_rules! check_htlc_fails {
- ($txid: expr, $commitment_tx: expr, $id: tt) => {
- if let Some(ref latest_outpoints) = self.counterparty_claimable_outpoints.get($txid) {
- $id: for &(ref htlc, ref source_option) in latest_outpoints.iter() {
- if let &Some(ref source) = source_option {
- // Check if the HTLC is present in the commitment transaction that was
- // broadcast, but not if it was below the dust limit, which we should
- // fail backwards immediately as there is no way for us to learn the
- // payment_preimage.
- // Note that if the dust limit were allowed to change between
- // commitment transactions we'd want to be check whether *any*
- // broadcastable commitment transaction has the HTLC in it, but it
- // cannot currently change after channel initialization, so we don't
- // need to here.
- for &(ref broadcast_htlc, ref broadcast_source) in per_commitment_data.iter() {
- if broadcast_htlc.transaction_output_index.is_some() && Some(source) == broadcast_source.as_ref() {
- continue $id;
- }
- }
- log_trace!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of counterparty commitment transaction", log_bytes!(htlc.payment_hash.0), $commitment_tx);
- self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
- if entry.height != height { return true; }
- match entry.event {
- OnchainEvent::HTLCUpdate { source: ref update_source, .. } => {
- *update_source != **source
- },
- _ => true,
- }
- });
- self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
- txid: *$txid,
- height,
- event: OnchainEvent::HTLCUpdate {
- source: (**source).clone(),
- payment_hash: htlc.payment_hash.clone(),
- onchain_value_satoshis: Some(htlc.amount_msat / 1000),
- },
- });
- }
- }
- }
- }
- }
- if let Some(ref txid) = self.current_counterparty_commitment_txid {
- check_htlc_fails!(txid, "current", 'current_loop);
- }
- if let Some(ref txid) = self.prev_counterparty_commitment_txid {
- check_htlc_fails!(txid, "previous", 'prev_loop);
- }
+ fail_unbroadcast_htlcs!(self, "counterparty", height, per_commitment_data.iter().map(|(a, b)| (a, b.as_ref().map(|b| b.as_ref()))), logger);
let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs(commitment_number, commitment_txid, Some(tx));
for req in htlc_claim_reqs {
let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx);
append_onchain_update!(res, to_watch);
+ fail_unbroadcast_htlcs!(self, "latest holder", height, self.current_holder_commitment_tx.htlc_outputs.iter().map(|(a, _, c)| (a, c.as_ref())), logger);
} else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
if holder_tx.txid == commitment_txid {
is_holder_tx = true;
let res = self.get_broadcasted_holder_claims(holder_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx);
append_onchain_update!(res, to_watch);
- }
- }
-
- macro_rules! fail_dust_htlcs_after_threshold_conf {
- ($holder_tx: expr, $commitment_tx: expr) => {
- for &(ref htlc, _, ref source) in &$holder_tx.htlc_outputs {
- if htlc.transaction_output_index.is_none() {
- if let &Some(ref source) = source {
- self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
- if entry.height != height { return true; }
- match entry.event {
- OnchainEvent::HTLCUpdate { source: ref update_source, .. } => {
- update_source != source
- },
- _ => true,
- }
- });
- let entry = OnchainEventEntry {
- txid: commitment_txid,
- height,
- event: OnchainEvent::HTLCUpdate {
- source: source.clone(), payment_hash: htlc.payment_hash,
- onchain_value_satoshis: Some(htlc.amount_msat / 1000)
- },
- };
- log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})",
- log_bytes!(htlc.payment_hash.0), $commitment_tx, entry.confirmation_threshold());
- self.onchain_events_awaiting_threshold_conf.push(entry);
- }
- }
- }
+ fail_unbroadcast_htlcs!(self, "previous holder", height, holder_tx.htlc_outputs.iter().map(|(a, _, c)| (a, c.as_ref())), logger);
}
}
if is_holder_tx {
- fail_dust_htlcs_after_threshold_conf!(self.current_holder_commitment_tx, "latest");
- if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
- fail_dust_htlcs_after_threshold_conf!(holder_tx, "previous");
- }
}
(claim_requests, (commitment_txid, watch_outputs))
L::Target: Logger,
{
let block_hash = header.block_hash();
- log_trace!(logger, "New best block {} at height {}", block_hash, height);
self.best_block = BestBlock::new(block_hash, height);
self.transactions_confirmed(header, txdata, height, broadcaster, fee_estimator, logger)
L::Target: Logger,
{
let block_hash = header.block_hash();
- log_trace!(logger, "New best block {} at height {}", block_hash, height);
if height > self.best_block.height() {
self.best_block = BestBlock::new(block_hash, height);
}
let block_hash = header.block_hash();
- log_trace!(logger, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len());
let mut watch_outputs = Vec::new();
let mut claimable_outpoints = Vec::new();
F::Target: FeeEstimator,
L::Target: Logger,
{
+ log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height);
debug_assert!(self.best_block.height() >= conf_height);
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
}));
break;
}
- if outp.script_pubkey == self.shutdown_script {
+ if self.shutdown_script.as_ref() == Some(&outp.script_pubkey) {
spendable_output = Some(SpendableOutputDescriptor::StaticOutput {
outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
output: outp.clone(),
_ => return Err(DecodeError::InvalidValue),
};
let counterparty_payment_script = Readable::read(reader)?;
- let shutdown_script = Readable::read(reader)?;
+ let shutdown_script = {
+ let script = <Script as Readable>::read(reader)?;
+ if script.is_empty() { None } else { Some(script) }
+ };
let channel_keys_id = Readable::read(reader)?;
let holder_revocation_basepoint = Readable::read(reader)?;
let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..waiting_threshold_conf_len {
- onchain_events_awaiting_threshold_conf.push(Readable::read(reader)?);
+ if let Some(val) = MaybeReadable::read(reader)? {
+ onchain_events_awaiting_threshold_conf.push(val);
+ }
}
let outputs_to_watch_len: u64 = Readable::read(reader)?;
use ln::{PaymentPreimage, PaymentHash};
use ln::chan_utils;
use ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
+ use ln::script::ShutdownScript;
use util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
use bitcoin::secp256k1::key::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
};
// Prune with one old state and a holder commitment tx holding a few overlaps with the
// old state.
+ let shutdown_pubkey = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let best_block = BestBlock::from_genesis(Network::Testnet);
let monitor = ChannelMonitor::new(Secp256k1::new(), keys,
- &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()), 0, &Script::new(),
+ Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &Script::new(),
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, Script::new()),
&channel_parameters,
Script::new(), 46, 0,
use ln::chan_utils;
use ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, HolderCommitmentTransaction, ChannelTransactionParameters, CommitmentTransaction};
use ln::msgs::UnsignedChannelAnnouncement;
+use ln::script::ShutdownScript;
use prelude::*;
use core::sync::atomic::{AtomicUsize, Ordering};
#[derive(Clone, Debug, PartialEq)]
pub enum SpendableOutputDescriptor {
/// An output to a script which was provided via KeysInterface directly, either from
- /// `get_destination_script()` or `get_shutdown_pubkey()`, thus you should already know how to
- /// spend it. No secret keys are provided as rust-lightning was never given any key.
+ /// `get_destination_script()` or `get_shutdown_scriptpubkey()`, thus you should already know
+ /// how to spend it. No secret keys are provided as rust-lightning was never given any key.
/// These may include outputs from a transaction punishing our counterparty or claiming an HTLC
/// on-chain using the payment preimage or after it has timed out.
StaticOutput {
/// This method should return a different value each time it is called, to avoid linking
/// on-chain funds across channels as controlled to the same user.
fn get_destination_script(&self) -> Script;
- /// Get a public key which we will send funds to (in the form of a P2WPKH output) when closing
- /// a channel.
+ /// Get a script pubkey which we will send funds to when closing a channel.
///
/// This method should return a different value each time it is called, to avoid linking
/// on-chain funds across channels as controlled to the same user.
- fn get_shutdown_pubkey(&self) -> PublicKey;
+ fn get_shutdown_scriptpubkey(&self) -> ShutdownScript;
/// Get a new set of Sign for per-channel secrets. These MUST be unique even if you
/// restarted with some stale data!
///
self.destination_script.clone()
}
- fn get_shutdown_pubkey(&self) -> PublicKey {
- self.shutdown_pubkey.clone()
+ fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
+ ShutdownScript::new_p2wpkh_from_pubkey(self.shutdown_pubkey.clone())
}
fn get_channel_signer(&self, _inbound: bool, channel_value_satoshis: u64) -> Self::Signer {
use chain::keysinterface::{Sign, KeysInterface};
use chain::package::PackageTemplate;
use util::logger::Logger;
-use util::ser::{Readable, ReadableArgs, Writer, Writeable, VecWriter};
+use util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, VecWriter};
use util::byte_utils;
use io;
}
}
-impl_writeable_tlv_based!(OnchainEventEntry, {
- (0, txid, required),
- (2, height, required),
- (4, event, required),
-});
+impl Writeable for OnchainEventEntry {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ write_tlv_fields!(writer, {
+ (0, self.txid, required),
+ (2, self.height, required),
+ (4, self.event, required),
+ });
+ Ok(())
+ }
+}
+
+impl MaybeReadable for OnchainEventEntry {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
+ let mut txid = Default::default();
+ let mut height = 0;
+ let mut event = None;
+ read_tlv_fields!(reader, {
+ (0, txid, required),
+ (2, height, required),
+ (4, event, ignorable),
+ });
+ if let Some(ev) = event {
+ Ok(Some(Self { txid, height, event: ev }))
+ } else {
+ Ok(None)
+ }
+ }
+}
-impl_writeable_tlv_based_enum!(OnchainEvent,
+impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
(0, Claim) => {
(0, claim_request, required),
},
(1, ContentiousOutpoint) => {
(0, package, required),
},
-;);
+);
impl Readable for Option<Vec<Option<(usize, Signature)>>> {
fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..waiting_threshold_conf_len {
- onchain_events_awaiting_threshold_conf.push(Readable::read(reader)?);
+ if let Some(val) = MaybeReadable::read(reader)? {
+ onchain_events_awaiting_threshold_conf.push(val);
+ }
}
read_tlv_fields!(reader, {});
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) {
- if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
+ if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Check that even though the persister is returning a TemporaryFailure,
// because the update is bogus, ultimately the error that's returned
// should be a PermanentFailure.
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
}
+#[test]
+fn test_pending_update_fee_ack_on_reconnect() {
+ // In early versions of our automated fee update patch, nodes did not correctly use the
+ // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an
+ // undelivered commitment_signed.
+ //
+ // B sends A new HTLC + CS, not delivered
+ // A sends B update_fee + CS
+ // B receives the CS and sends RAA, previously causing B to lock in the new feerate
+ // reconnect
+ // B resends initial CS, using the original fee
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
+
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
+ let route = get_route(&nodes[1].node.get_our_node_id(), &nodes[1].net_graph_msg_handler.network_graph.read().unwrap(),
+ &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap();
+ nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ // bs_initial_send_msgs are not delivered until they are re-generated after reconnect
+
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock *= 2;
+ }
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+ let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert!(as_update_fee_msgs.update_fee.is_some());
+
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap());
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed);
+ check_added_monitors!(nodes[1], 1);
+ let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ // bs_first_raa is not delivered until it is re-generated after reconnect
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
+ let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(bs_resend_msgs.len(), 3);
+ if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] {
+ assert_eq!(*updates, bs_initial_send_msgs);
+ } else { panic!(); }
+ if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] {
+ assert_eq!(*msg, bs_first_raa);
+ } else { panic!(); }
+ if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); }
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
+ get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
+ check_added_monitors!(nodes[1], 1);
+ let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed;
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+ check_added_monitors!(nodes[0], 1);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed);
+ check_added_monitors!(nodes[1], 1);
+ let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[0]);
+ expect_payment_received!(nodes[0], payment_hash, payment_secret, 1_000_000);
+
+ claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
+}
+
+fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
+ // In early versions we did not handle resending of update_fee on reconnect correctly. The
+ // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
+ // explicitly here.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ send_payment(&nodes[0], &[&nodes[1]], 1000);
+
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock += 20;
+ }
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+ let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert!(update_msgs.update_fee.is_some());
+ if deliver_update {
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
+ }
+
+ if parallel_updates {
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock += 20;
+ }
+ nodes[0].node.timer_tick_occurred();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
+ get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
+ let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(as_reconnect_msgs.len(), 2);
+ if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); }
+ let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap()
+ { updates } else { panic!(); };
+ assert!(update_msgs.update_fee.is_some());
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
+ if parallel_updates {
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed);
+ check_added_monitors!(nodes[1], 1);
+ let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+ check_added_monitors!(nodes[0], 1);
+ let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
+ check_added_monitors!(nodes[0], 1);
+ let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap());
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
+ check_added_monitors!(nodes[1], 1);
+ let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
+ let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+ let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
+ check_added_monitors!(nodes[1], 1);
+ } else {
+ commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false);
+ }
+
+ send_payment(&nodes[0], &[&nodes[1]], 1000);
+}
+#[test]
+fn update_fee_resend_test() {
+ do_update_fee_resend_test(false, false);
+ do_update_fee_resend_test(true, false);
+ do_update_fee_resend_test(false, true);
+ do_update_fee_resend_test(true, true);
+}
+
fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
// Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
// properly free them on reconnect. We previously failed such HTLCs upon serialization, but
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
}
+
+#[test]
+fn test_temporary_error_during_shutdown() {
+ // Test that temporary failures when updating the monitor's shutdown script delay cooperative
+ // close.
+ let mut config = test_default_channel_config();
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+
+ nodes[0].node.close_channel(&channel_id).unwrap();
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
+ check_added_monitors!(nodes[0], 1);
+
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = None;
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = None;
+
+ let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()));
+
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = None;
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
+ let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap());
+ let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ assert!(none_b.is_none());
+ let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+
+ assert_eq!(txn_a, txn_b);
+ assert_eq!(txn_a.len(), 1);
+ check_spends!(txn_a[0], funding_tx);
+}
+
+#[test]
+fn test_permanent_error_during_sending_shutdown() {
+ // Test that permanent failures when updating the monitor's shutdown script result in a force
+ // close when initiating a cooperative close.
+ let mut config = test_default_channel_config();
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure));
+
+ assert!(nodes[0].node.close_channel(&channel_id).is_ok());
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 2);
+}
+
+#[test]
+fn test_permanent_error_during_handling_shutdown() {
+ // Test that permanent failures when updating the monitor's shutdown script result in a force
+ // close when handling a cooperative close.
+ let mut config = test_default_channel_config();
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure));
+
+ assert!(nodes[0].node.close_channel(&channel_id).is_ok());
+ let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &shutdown);
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 2);
+}
use bitcoin::blockdata::script::{Script,Builder};
use bitcoin::blockdata::transaction::{TxIn, TxOut, Transaction, SigHashType};
-use bitcoin::blockdata::opcodes;
use bitcoin::util::bip143;
use bitcoin::consensus::encode;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256d;
-use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
+use bitcoin::hash_types::{Txid, BlockHash};
+use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
use bitcoin::secp256k1::key::{PublicKey,SecretKey};
use bitcoin::secp256k1::{Secp256k1,Signature};
use bitcoin::secp256k1;
use ln::features::{ChannelFeatures, InitFeatures};
use ln::msgs;
use ln::msgs::{DecodeError, OptionalField, DataLossProtect};
+use ln::script::ShutdownScript;
use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor};
use ln::chan_utils;
use io;
use prelude::*;
use core::{cmp,mem,fmt};
+use core::convert::TryFrom;
use core::ops::Deref;
#[cfg(any(test, feature = "fuzztarget", debug_assertions))]
use sync::Mutex;
use bitcoin::hashes::hex::ToHex;
-use bitcoin::blockdata::opcodes::all::OP_PUSHBYTES_0;
#[cfg(test)]
pub struct ChannelValueStat {
pub counterparty_dust_limit_msat: u64,
}
+#[derive(Clone, Copy, PartialEq)]
+enum FeeUpdateState {
+ // Inbound states mirroring InboundHTLCState
+ RemoteAnnounced,
+ AwaitingRemoteRevokeToAnnounce,
+ // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
+ // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
+ // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
+ // the fee update anywhere, we can simply consider the fee update `Committed` immediately
+ // instead of setting it to AwaitingAnnouncedRemoteRevoke.
+
+ // Outbound state can only be `LocalAnnounced` or `Committed`
+ Outbound,
+}
+
enum InboundHTLCRemovalReason {
FailRelay(msgs::OnionErrorPacket),
FailMalformed(([u8; 32], u16)),
RemoteShutdownSent = 1 << 10,
/// Flag which is set on ChannelFunded or FundingSent after sending a shutdown message. At this
/// point, we may not add any new HTLCs to the channel.
- /// TODO: Investigate some kind of timeout mechanism by which point the remote end must provide
- /// us their shutdown.
LocalShutdownSent = 1 << 11,
/// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
/// to drop us, but we store this anyway.
RemoteOffered,
}
+/// An enum gathering stats on pending HTLCs, either inbound or outbound side.
+struct HTLCStats {
+ pending_htlcs: u32,
+ pending_htlcs_value_msat: u64,
+ on_counterparty_tx_dust_exposure_msat: u64,
+ on_holder_tx_dust_exposure_msat: u64,
+}
+
/// Used when calculating whether we or the remote can afford an additional HTLC.
struct HTLCCandidate {
amount_msat: u64,
DuplicateClaim {},
}
+/// If the majority of the channels funds are to the fundee and the initiator holds only just
+/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
+/// initiator controls the feerate, if they then go to increase the channel fee, they may have no
+/// balance but the fundee is unable to send a payment as the increase in fee more than drains
+/// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
+/// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
+/// by this multiple without hitting this case, before sending.
+/// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
+/// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
+/// HTLCs for days we may need this to suffice for feerate increases across days, but that may
+/// leave the channel less usable as we hold a bigger reserve.
+#[cfg(fuzzing)]
+pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
+#[cfg(not(fuzzing))]
+const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
+
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
latest_monitor_update_id: u64,
holder_signer: Signer,
- shutdown_pubkey: PublicKey,
+ shutdown_scriptpubkey: Option<ShutdownScript>,
destination_script: Script,
// Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
// revoke_and_ack is received and new commitment_signed is generated to be
// sent to the funder. Otherwise, the pending value is removed when receiving
// commitment_signed.
- pending_update_fee: Option<u32>,
+ pending_update_fee: Option<(u32, FeeUpdateState)>,
// update_fee() during ChannelState::AwaitingRemoteRevoke is hold in
// holdina_cell_update_fee then moved to pending_udpate_fee when revoke_and_ack
// is received. holding_cell_update_fee is updated when there are additional
/// Max to_local and to_remote outputs in a remote-generated commitment transaction
counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
- last_sent_closing_fee: Option<(u32, u64, Signature)>, // (feerate, fee, holder_sig)
+ last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
+ target_closing_feerate_sats_per_kw: Option<u32>,
+
+ /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
+ /// update, we need to delay processing it until later. We do that here by simply storing the
+ /// closing_signed message and handling it in `maybe_propose_closing_signed`.
+ pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
+
+ /// The minimum and maximum absolute fee we are willing to place on the closing transaction.
+ /// These are set once we reach `closing_negotiation_ready`.
+ #[cfg(test)]
+ pub(crate) closing_fee_limits: Option<(u64, u64)>,
+ #[cfg(not(test))]
+ closing_fee_limits: Option<(u64, u64)>,
/// The hash of the block in which the funding transaction was included.
funding_tx_confirmed_in: Option<BlockHash>,
commitment_secrets: CounterpartyCommitmentSecrets,
channel_update_status: ChannelUpdateStatus,
+ /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
+ /// not complete within a single timer tick (one minute), we should force-close the channel.
+ /// This prevents us from keeping unusable channels around forever if our counterparty wishes
+ /// to DoS us.
+ /// Note that this field is reset to false on deserialization to give us a chance to connect to
+ /// our peer and start the closing_signed negotiation fresh.
+ closing_signed_in_flight: bool,
/// Our counterparty's channel_announcement signatures provided in announcement_signatures.
/// This can be used to rebroadcast the channel_announcement message later.
/// channel_id in ChannelManager.
pub(super) enum ChannelError {
Ignore(String),
+ Warn(String),
Close(String),
CloseDelayBroadcast(String),
}
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
+ &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
&ChannelError::Close(ref e) => write!(f, "Close : {}", e),
&ChannelError::CloseDelayBroadcast(ref e) => write!(f, "CloseDelayBroadcast : {}", e)
}
}
// Constructors:
- pub fn new_outbound<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig) -> Result<Channel<Signer>, APIError>
+ pub fn new_outbound<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig) -> Result<Channel<Signer>, APIError>
where K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
{
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_provider.get_secure_random_bytes());
+ let shutdown_scriptpubkey = if config.channel_options.commit_upfront_shutdown_pubkey {
+ Some(keys_provider.get_shutdown_scriptpubkey())
+ } else { None };
+
+ if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+ if !shutdown_scriptpubkey.is_compatible(&their_features) {
+ return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+ }
+ }
+
Ok(Channel {
user_id,
config: config.channel_options.clone(),
latest_monitor_update_id: 0,
holder_signer,
- shutdown_pubkey: keys_provider.get_shutdown_pubkey(),
+ shutdown_scriptpubkey,
destination_script: keys_provider.get_destination_script(),
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
last_sent_closing_fee: None,
+ pending_counterparty_closing_signed: None,
+ closing_fee_limits: None,
+ target_closing_feerate_sats_per_kw: None,
funding_tx_confirmed_in: None,
funding_tx_confirmation_height: 0,
commitment_secrets: CounterpartyCommitmentSecrets::new(),
channel_update_status: ChannelUpdateStatus::Enabled,
+ closing_signed_in_flight: false,
announcement_sigs: None,
if feerate_per_kw < lower_limit {
return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
}
- let upper_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 2;
+ // We only bound the fee updates on the upper side to prevent completely absurd feerates,
+ // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
+ // We generally don't care too much if they set the feerate to something very high, but it
+ // could result in the channel being useless due to everything being dust.
+ let upper_limit = cmp::max(250 * 25,
+ fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
if feerate_per_kw as u64 > upper_limit {
return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
}
/// Creates a new channel from a remote sides' request for one.
/// Assumes chain_hash has already been checked and corresponds with what we expect!
- pub fn new_from_req<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel, user_id: u64, config: &UserConfig) -> Result<Channel<Signer>, ChannelError>
+ pub fn new_from_req<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u64, config: &UserConfig) -> Result<Channel<Signer>, ChannelError>
where K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator
{
// Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
if script.len() == 0 {
None
- // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
- } else if is_unsupported_shutdown_script(&their_features, script) {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: ({})", script.to_bytes().to_hex())));
} else {
- Some(script.clone())
+ match ShutdownScript::try_from((script.clone(), their_features)) {
+ Ok(shutdown_script) => Some(shutdown_script.into_inner()),
+ Err(_) => return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))),
+ }
}
},
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
}
} else { None };
+ let shutdown_scriptpubkey = if config.channel_options.commit_upfront_shutdown_pubkey {
+ Some(keys_provider.get_shutdown_scriptpubkey())
+ } else { None };
+
+ if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+ if !shutdown_scriptpubkey.is_compatible(&their_features) {
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ }
+ }
+
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_provider.get_secure_random_bytes());
latest_monitor_update_id: 0,
holder_signer,
- shutdown_pubkey: keys_provider.get_shutdown_pubkey(),
+ shutdown_scriptpubkey,
destination_script: keys_provider.get_destination_script(),
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
last_sent_closing_fee: None,
+ pending_counterparty_closing_signed: None,
+ closing_fee_limits: None,
+ target_closing_feerate_sats_per_kw: None,
funding_tx_confirmed_in: None,
funding_tx_confirmation_height: 0,
commitment_secrets: CounterpartyCommitmentSecrets::new(),
channel_update_status: ChannelUpdateStatus::Enabled,
+ closing_signed_in_flight: false,
announcement_sigs: None,
/// which peer generated this transaction and "to whom" this transaction flows.
/// Returns (the transaction info, the number of HTLC outputs which were present in the
/// transaction, the list of HTLCs which were not ignored when building the transaction).
- /// Note that below-dust HTLCs are included in the third return value, but not the second, and
- /// sources are provided only for outbound HTLCs in the third return value.
+ /// Note that below-dust HTLCs are included in the fourth return value, but not the third, and
+ /// sources are provided only for outbound HTLCs in the fourth return value.
#[inline]
- fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, feerate_per_kw: u32, logger: &L) -> (CommitmentTransaction, usize, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
+ fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> (CommitmentTransaction, u32, usize, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
let mut local_htlc_total_msat = 0;
let mut value_to_self_msat_offset = 0;
+ let mut feerate_per_kw = self.feerate_per_kw;
+ if let Some((feerate, update_state)) = self.pending_update_fee {
+ if match update_state {
+ // Note that these match the inclusion criteria when scanning
+ // pending_inbound_htlcs below.
+ FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
+ FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
+ FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
+ } {
+ feerate_per_kw = feerate;
+ }
+ }
+
log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
htlcs_included.append(&mut included_dust_htlcs);
- (tx, num_nondust_htlcs, htlcs_included)
+ (tx, feerate_per_kw, num_nondust_htlcs, htlcs_included)
}
#[inline]
fn get_closing_scriptpubkey(&self) -> Script {
- let channel_close_key_hash = WPubkeyHash::hash(&self.shutdown_pubkey.serialize());
- Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_close_key_hash[..]).into_script()
+ // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
+ // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
+ // outside of those situations will fail.
+ self.shutdown_scriptpubkey.clone().unwrap().into_inner()
}
#[inline]
assert!(self.pending_inbound_htlcs.is_empty());
assert!(self.pending_outbound_htlcs.is_empty());
+ assert!(self.pending_update_fee.is_none());
let mut txouts: Vec<(TxOut, ())> = Vec::new();
let mut total_fee_satoshis = proposed_total_fee_satoshis;
}, ()));
}
+ assert!(self.shutdown_scriptpubkey.is_some());
if value_to_self as u64 > self.holder_dust_limit_satoshis {
txouts.push((TxOut {
script_pubkey: self.get_closing_scriptpubkey(),
// Message handlers:
- pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_features: InitFeatures) -> Result<(), ChannelError> {
+ pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_features: &InitFeatures) -> Result<(), ChannelError> {
// Check sanity of message fields:
if !self.is_outbound() {
return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
// Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
if script.len() == 0 {
None
- // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
- } else if is_unsupported_shutdown_script(&their_features, script) {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: ({})", script.to_bytes().to_hex())));
} else {
- Some(script.clone())
+ match ShutdownScript::try_from((script.clone(), their_features)) {
+ Ok(shutdown_script) => Some(shutdown_script.into_inner()),
+ Err(_) => return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))),
+ }
}
},
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
let funding_script = self.get_funding_redeemscript();
let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
- let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, self.feerate_per_kw, logger).0;
+ let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger).0;
{
let trusted_tx = initial_commitment_tx.trust();
let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
}
let counterparty_keys = self.build_remote_transaction_keys()?;
- let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, self.feerate_per_kw, logger).0;
+ let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).0;
let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
let funding_redeemscript = self.get_funding_redeemscript();
let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound());
+ let shutdown_script = self.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
let channel_monitor = ChannelMonitor::new(self.secp_ctx.clone(), self.holder_signer.clone(),
- &self.shutdown_pubkey, self.get_holder_selected_contest_delay(),
+ shutdown_script, self.get_holder_selected_contest_delay(),
&self.destination_script, (funding_txo, funding_txo_script.clone()),
&self.channel_transaction_parameters,
funding_redeemscript.clone(), self.channel_value_satoshis,
let funding_script = self.get_funding_redeemscript();
let counterparty_keys = self.build_remote_transaction_keys()?;
- let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, self.feerate_per_kw, logger).0;
+ let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).0;
let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
- let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, self.feerate_per_kw, logger).0;
+ let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).0;
{
let trusted_tx = initial_commitment_tx.trust();
let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
let funding_txo = self.get_funding_txo().unwrap();
let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound());
+ let shutdown_script = self.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
let channel_monitor = ChannelMonitor::new(self.secp_ctx.clone(), self.holder_signer.clone(),
- &self.shutdown_pubkey, self.get_holder_selected_contest_delay(),
+ shutdown_script, self.get_holder_selected_contest_delay(),
&self.destination_script, (funding_txo, funding_txo_script),
&self.channel_transaction_parameters,
funding_redeemscript.clone(), self.channel_value_satoshis,
Ok(())
}
- /// Returns (inbound_htlc_count, htlc_inbound_value_msat)
- fn get_inbound_pending_htlc_stats(&self) -> (u32, u64) {
- let mut htlc_inbound_value_msat = 0;
+ /// Returns a HTLCStats about inbound pending htlcs
+ fn get_inbound_pending_htlc_stats(&self) -> HTLCStats {
+ let mut stats = HTLCStats {
+ pending_htlcs: self.pending_inbound_htlcs.len() as u32,
+ pending_htlcs_value_msat: 0,
+ on_counterparty_tx_dust_exposure_msat: 0,
+ on_holder_tx_dust_exposure_msat: 0,
+ };
+
+ let counterparty_dust_limit_timeout_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis;
+ let holder_dust_limit_success_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis;
for ref htlc in self.pending_inbound_htlcs.iter() {
- htlc_inbound_value_msat += htlc.amount_msat;
+ stats.pending_htlcs_value_msat += htlc.amount_msat;
+ if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
+ stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
+ stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+ }
}
- (self.pending_inbound_htlcs.len() as u32, htlc_inbound_value_msat)
+ stats
}
- /// Returns (outbound_htlc_count, htlc_outbound_value_msat) *including* pending adds in our
- /// holding cell.
- fn get_outbound_pending_htlc_stats(&self) -> (u32, u64) {
- let mut htlc_outbound_value_msat = 0;
+ /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
+ fn get_outbound_pending_htlc_stats(&self) -> HTLCStats {
+ let mut stats = HTLCStats {
+ pending_htlcs: self.pending_outbound_htlcs.len() as u32,
+ pending_htlcs_value_msat: 0,
+ on_counterparty_tx_dust_exposure_msat: 0,
+ on_holder_tx_dust_exposure_msat: 0,
+ };
+
+ let counterparty_dust_limit_success_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis;
+ let holder_dust_limit_timeout_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis;
for ref htlc in self.pending_outbound_htlcs.iter() {
- htlc_outbound_value_msat += htlc.amount_msat;
+ stats.pending_htlcs_value_msat += htlc.amount_msat;
+ if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
+ stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
+ stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+ }
}
- let mut htlc_outbound_count = self.pending_outbound_htlcs.len();
for update in self.holding_cell_htlc_updates.iter() {
if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
- htlc_outbound_count += 1;
- htlc_outbound_value_msat += amount_msat;
+ stats.pending_htlcs += 1;
+ stats.pending_htlcs_value_msat += amount_msat;
+ if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
+ stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
+ }
+ if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
+ stats.on_holder_tx_dust_exposure_msat += amount_msat;
+ }
}
}
-
- (htlc_outbound_count as u32, htlc_outbound_value_msat)
+ stats
}
/// Get the available (ie not including pending HTLCs) inbound and outbound balance in msat.
(
cmp::max(self.channel_value_satoshis as i64 * 1000
- self.value_to_self_msat as i64
- - self.get_inbound_pending_htlc_stats().1 as i64
+ - self.get_inbound_pending_htlc_stats().pending_htlcs_value_msat as i64
- Self::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) as i64 * 1000,
0) as u64,
cmp::max(self.value_to_self_msat as i64
- - self.get_outbound_pending_htlc_stats().1 as i64
+ - self.get_outbound_pending_htlc_stats().pending_htlcs_value_msat as i64
- self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000,
0) as u64
)
return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.holder_htlc_minimum_msat, msg.amount_msat)));
}
- let (inbound_htlc_count, htlc_inbound_value_msat) = self.get_inbound_pending_htlc_stats();
- if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 {
+ let inbound_stats = self.get_inbound_pending_htlc_stats();
+ let outbound_stats = self.get_outbound_pending_htlc_stats();
+ if inbound_stats.pending_htlcs + 1 > OUR_MAX_HTLCS as u32 {
return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", OUR_MAX_HTLCS)));
}
let holder_max_htlc_value_in_flight_msat = Channel::<Signer>::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis);
- if htlc_inbound_value_msat + msg.amount_msat > holder_max_htlc_value_in_flight_msat {
+ if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > holder_max_htlc_value_in_flight_msat {
return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", holder_max_htlc_value_in_flight_msat)));
}
// Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
}
}
+ let exposure_dust_limit_timeout_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
+ let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
+ if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
+ on_counterparty_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ }
+ }
+
+ let exposure_dust_limit_success_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
+ let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
+ if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
+ on_holder_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ }
+ }
+
let pending_value_to_self_msat =
- self.value_to_self_msat + htlc_inbound_value_msat - removed_outbound_total_msat;
+ self.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
let pending_remote_value_msat =
self.channel_value_satoshis * 1000 - pending_value_to_self_msat;
if pending_remote_value_msat < msg.amount_msat {
Ok(())
}
- pub fn commitment_signed<F: Deref, L: Deref>(&mut self, msg: &msgs::CommitmentSigned, fee_estimator: &F, logger: &L) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, Option<msgs::ClosingSigned>, ChannelMonitorUpdate), (Option<ChannelMonitorUpdate>, ChannelError)>
- where F::Target: FeeEstimator,
- L::Target: Logger
+ pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, ChannelMonitorUpdate), (Option<ChannelMonitorUpdate>, ChannelError)>
+ where L::Target: Logger
{
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())));
let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number).map_err(|e| (None, e))?;
- let mut update_fee = false;
- let feerate_per_kw = if !self.is_outbound() && self.pending_update_fee.is_some() {
- update_fee = true;
- self.pending_update_fee.unwrap()
- } else {
- self.feerate_per_kw
- };
-
- let (num_htlcs, mut htlcs_cloned, commitment_tx, commitment_txid) = {
- let commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, feerate_per_kw, logger);
+ let (num_htlcs, mut htlcs_cloned, commitment_tx, commitment_txid, feerate_per_kw) = {
+ let commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger);
let commitment_txid = {
let trusted_tx = commitment_tx.0.trust();
let bitcoin_tx = trusted_tx.built_transaction();
}
bitcoin_tx.txid
};
- let htlcs_cloned: Vec<_> = commitment_tx.2.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
- (commitment_tx.1, htlcs_cloned, commitment_tx.0, commitment_txid)
+ let htlcs_cloned: Vec<_> = commitment_tx.3.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
+ (commitment_tx.2, htlcs_cloned, commitment_tx.0, commitment_txid, commitment_tx.1)
};
+ // If our counterparty updated the channel fee in this commitment transaction, check that
+ // they can actually afford the new fee now.
+ let update_fee = if let Some((_, update_state)) = self.pending_update_fee {
+ update_state == FeeUpdateState::RemoteAnnounced
+ } else { false };
+ if update_fee { debug_assert!(!self.is_outbound()); }
let total_fee = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
- //If channel fee was updated by funder confirm funder can afford the new fee rate when applied to the current local commitment transaction
if update_fee {
let counterparty_reserve_we_require = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis);
if self.channel_value_satoshis - self.value_to_self_msat / 1000 < total_fee + counterparty_reserve_we_require {
// Update state now that we've passed all the can-fail calls...
let mut need_commitment = false;
- if !self.is_outbound() {
- if let Some(fee_update) = self.pending_update_fee {
- self.feerate_per_kw = fee_update;
- // We later use the presence of pending_update_fee to indicate we should generate a
- // commitment_signed upon receipt of revoke_and_ack, so we can only set it to None
- // if we're not awaiting a revoke (ie will send a commitment_signed now).
- if (self.channel_state & ChannelState::AwaitingRemoteRevoke as u32) == 0 {
- need_commitment = true;
- self.pending_update_fee = None;
- }
+ if let &mut Some((_, ref mut update_state)) = &mut self.pending_update_fee {
+ if *update_state == FeeUpdateState::RemoteAnnounced {
+ *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
+ need_commitment = true;
}
}
}
log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
log_bytes!(self.channel_id));
- // TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and
- // re-send the message on restoration)
return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned())));
}
- let (commitment_signed, closing_signed) = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ let commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
// If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
// we'll send one right away when we get the revoke_and_ack when we
// free_holding_cell_htlcs().
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
- (Some(msg), None)
- } else if !need_commitment {
- (None, self.maybe_propose_first_closing_signed(fee_estimator))
- } else { (None, None) };
+ Some(msg)
+ } else { None };
log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" });
channel_id: self.channel_id,
per_commitment_secret,
next_per_commitment_point,
- }, commitment_signed, closing_signed, monitor_update))
+ }, commitment_signed, monitor_update))
}
/// Public version of the below, checking relevant preconditions first.
if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.holding_cell_update_fee.is_none() {
return Ok((None, htlcs_to_fail));
}
- let update_fee = if let Some(feerate) = self.holding_cell_update_fee {
- self.pending_update_fee = self.holding_cell_update_fee.take();
+ let update_fee = if let Some(feerate) = self.holding_cell_update_fee.take() {
+ assert!(self.is_outbound());
+ self.pending_update_fee = Some((feerate, FeeUpdateState::Outbound));
Some(msgs::UpdateFee {
channel_id: self.channel_id,
feerate_per_kw: feerate as u32,
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK, fee_estimator: &F, logger: &L) -> Result<(Option<msgs::CommitmentUpdate>, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Option<msgs::ClosingSigned>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>), ChannelError>
- where F::Target: FeeEstimator,
- L::Target: Logger,
+ pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Option<msgs::CommitmentUpdate>, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ where L::Target: Logger,
{
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
}
self.value_to_self_msat = (self.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
- if self.is_outbound() {
- if let Some(feerate) = self.pending_update_fee.take() {
- self.feerate_per_kw = feerate;
- }
- } else {
- if let Some(feerate) = self.pending_update_fee {
- // Because a node cannot send two commitment_signeds in a row without getting a
- // revoke_and_ack from us (as it would otherwise not know the per_commitment_point
- // it should use to create keys with) and because a node can't send a
- // commitment_signed without changes, checking if the feerate is equal to the
- // pending feerate update is sufficient to detect require_commitment.
- if feerate == self.feerate_per_kw {
+ if let Some((feerate, update_state)) = self.pending_update_fee {
+ match update_state {
+ FeeUpdateState::Outbound => {
+ debug_assert!(self.is_outbound());
+ log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
+ self.feerate_per_kw = feerate;
+ self.pending_update_fee = None;
+ },
+ FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); },
+ FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
+ debug_assert!(!self.is_outbound());
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
require_commitment = true;
+ self.feerate_per_kw = feerate;
self.pending_update_fee = None;
- }
+ },
}
}
self.monitor_pending_forwards.append(&mut to_forward_infos);
self.monitor_pending_failures.append(&mut revoked_htlcs);
log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
- return Ok((None, Vec::new(), Vec::new(), None, monitor_update, Vec::new()))
+ return Ok((None, Vec::new(), Vec::new(), monitor_update, Vec::new()))
}
match self.free_holding_cell_htlcs(logger)? {
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
- Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, None, monitor_update, htlcs_to_fail))
+ Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail))
},
(None, htlcs_to_fail) => {
if require_commitment {
update_fail_malformed_htlcs,
update_fee: None,
commitment_signed
- }), to_forward_infos, revoked_htlcs, None, monitor_update, htlcs_to_fail))
+ }), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail))
} else {
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
- Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update, htlcs_to_fail))
+ Ok((None, to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail))
}
}
}
-
}
/// Adds a pending update to this channel. See the doc for send_htlc for
panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
}
- if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
+ if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
self.holding_cell_update_fee = Some(feerate_per_kw);
return None;
}
debug_assert!(self.pending_update_fee.is_none());
- self.pending_update_fee = Some(feerate_per_kw);
+ self.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
Some(msgs::UpdateFee {
channel_id: self.channel_id,
// Upon reconnect we have to start the closing_signed dance over, but shutdown messages
// will be retransmitted.
self.last_sent_closing_fee = None;
+ self.pending_counterparty_closing_signed = None;
+ self.closing_fee_limits = None;
let mut inbound_drop_count = 0;
self.pending_inbound_htlcs.retain(|htlc| {
});
self.next_counterparty_htlc_id -= inbound_drop_count;
+ if let Some((_, update_state)) = self.pending_update_fee {
+ if update_state == FeeUpdateState::RemoteAnnounced {
+ debug_assert!(!self.is_outbound());
+ self.pending_update_fee = None;
+ }
+ }
+
for htlc in self.pending_outbound_htlcs.iter_mut() {
if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
// They sent us an update to remove this but haven't yet sent the corresponding
return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
}
Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
- self.pending_update_fee = Some(msg.feerate_per_kw);
+ let feerate_over_dust_buffer = msg.feerate_per_kw > self.get_dust_buffer_feerate();
+
+ self.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
self.update_time_counter += 1;
+ // If the feerate has increased over the previous dust buffer (note that
+ // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
+ // won't be pushed over our dust exposure limit by the feerate increase.
+ if feerate_over_dust_buffer {
+ let inbound_stats = self.get_inbound_pending_htlc_stats();
+ let outbound_stats = self.get_outbound_pending_htlc_stats();
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
+ msg.feerate_per_kw, holder_tx_dust_exposure)));
+ }
+ if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
+ msg.feerate_per_kw, counterparty_tx_dust_exposure)));
+ }
+ }
Ok(())
}
}
}
- log_trace!(logger, "Regenerated latest commitment update in channel {} with {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
- log_bytes!(self.channel_id()), update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
+ let update_fee = if self.is_outbound() && self.pending_update_fee.is_some() {
+ Some(msgs::UpdateFee {
+ channel_id: self.channel_id(),
+ feerate_per_kw: self.pending_update_fee.unwrap().0,
+ })
+ } else { None };
+
+ log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
+ log_bytes!(self.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
msgs::CommitmentUpdate {
- update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs,
- update_fee: None,
+ update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
}
}
self.channel_state &= !(ChannelState::PeerDisconnected as u32);
let shutdown_msg = if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
+ assert!(self.shutdown_scriptpubkey.is_some());
Some(msgs::Shutdown {
channel_id: self.channel_id,
scriptpubkey: self.get_closing_scriptpubkey(),
// now!
match self.free_holding_cell_htlcs(logger) {
Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
- Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
+ Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) =>
+ panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
Ok((Some((commitment_update, monitor_update)), htlcs_to_fail)) => {
return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(monitor_update), self.resend_order.clone(), htlcs_to_fail, shutdown_msg));
},
}
}
- fn maybe_propose_first_closing_signed<F: Deref>(&mut self, fee_estimator: &F) -> Option<msgs::ClosingSigned>
+ /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
+ /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
+ /// at which point they will be recalculated.
+ fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &F) -> (u64, u64)
where F::Target: FeeEstimator
{
- if !self.is_outbound() || !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() ||
- self.channel_state & (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32) != BOTH_SIDES_SHUTDOWN_MASK ||
- self.last_sent_closing_fee.is_some() || self.pending_update_fee.is_some() {
- return None;
- }
+ if let Some((min, max)) = self.closing_fee_limits { return (min, max); }
+ // Propose a range from our current Background feerate to our Normal feerate plus our
+ // force_close_avoidance_max_fee_satoshis.
+ // If we fail to come to consensus, we'll have to force-close.
let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
- if self.feerate_per_kw > proposed_feerate {
- proposed_feerate = self.feerate_per_kw;
- }
+ let normal_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() };
+
+ // The spec requires that (when the channel does not have anchors) we only send absolute
+ // channel fees no greater than the absolute channel fee on the current commitment
+ // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
+ // very good reason to apply such a limit in any case. We don't bother doing so, risking
+ // some force-closure by old nodes, but we wanted to close the channel anyway.
+
+ if let Some(target_feerate) = self.target_closing_feerate_sats_per_kw {
+ let min_feerate = if self.is_outbound() { target_feerate } else { cmp::min(self.feerate_per_kw, target_feerate) };
+ proposed_feerate = cmp::max(proposed_feerate, min_feerate);
+ proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
+ }
+
+ // Note that technically we could end up with a lower minimum fee if one sides' balance is
+ // below our dust limit, causing the output to disappear. We don't bother handling this
+ // case, however, as this should only happen if a channel is closed before any (material)
+ // payments have been made on it. This may cause slight fee overpayment and/or failure to
+ // come to consensus with our counterparty on appropriate fees, however it should be a
+ // relatively rare case. We can revisit this later, though note that in order to determine
+ // if the funders' output is dust we have to know the absolute fee we're going to use.
let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
+ let proposed_max_total_fee_satoshis = if self.is_outbound() {
+ // We always add force_close_avoidance_max_fee_satoshis to our normal
+ // feerate-calculated fee, but allow the max to be overridden if we're using a
+ // target feerate-calculated fee.
+ cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.config.force_close_avoidance_max_fee_satoshis,
+ proposed_max_feerate as u64 * tx_weight / 1000)
+ } else {
+ u64::max_value()
+ };
+
+ self.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
+ self.closing_fee_limits.clone().unwrap()
+ }
+
+ /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
+ /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
+ /// this point if we're the funder we should send the initial closing_signed, and in any case
+ /// shutdown should complete within a reasonable timeframe.
+ fn closing_negotiation_ready(&self) -> bool {
+ self.pending_inbound_htlcs.is_empty() && self.pending_outbound_htlcs.is_empty() &&
+ self.channel_state &
+ (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
+ ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)
+ == BOTH_SIDES_SHUTDOWN_MASK &&
+ self.pending_update_fee.is_none()
+ }
+
+ /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
+ /// an Err if no progress is being made and the channel should be force-closed instead.
+ /// Should be called on a one-minute timer.
+ pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
+ if self.closing_negotiation_ready() {
+ if self.closing_signed_in_flight {
+ return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
+ } else {
+ self.closing_signed_in_flight = true;
+ }
+ }
+ Ok(())
+ }
+
+ pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(&mut self, fee_estimator: &F, logger: &L)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
+ return Ok((None, None));
+ }
+
+ if !self.is_outbound() {
+ if let Some(msg) = &self.pending_counterparty_closing_signed.take() {
+ return self.closing_signed(fee_estimator, &msg);
+ }
+ return Ok((None, None));
+ }
+
+ let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
+
+ assert!(self.shutdown_scriptpubkey.is_some());
+ let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
+ log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
+ our_min_fee, our_max_fee, total_fee_satoshis);
- let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(proposed_total_fee_satoshis, false);
let sig = self.holder_signer
.sign_closing_transaction(&closing_tx, &self.secp_ctx)
- .ok();
- assert!(closing_tx.get_weight() as u64 <= tx_weight);
- if sig.is_none() { return None; }
+ .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
- self.last_sent_closing_fee = Some((proposed_feerate, total_fee_satoshis, sig.clone().unwrap()));
- Some(msgs::ClosingSigned {
+ self.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
+ Ok((Some(msgs::ClosingSigned {
channel_id: self.channel_id,
fee_satoshis: total_fee_satoshis,
- signature: sig.unwrap(),
- })
+ signature: sig,
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: our_min_fee,
+ max_fee_satoshis: our_max_fee,
+ }),
+ }), None))
}
- pub fn shutdown<F: Deref>(&mut self, fee_estimator: &F, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
- where F::Target: FeeEstimator
+ pub fn shutdown<K: Deref>(
+ &mut self, keys_provider: &K, their_features: &InitFeatures, msg: &msgs::Shutdown
+ ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ where K::Target: KeysInterface<Signer = Signer>
{
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
}
assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if is_unsupported_shutdown_script(&their_features, &msg.scriptpubkey) {
- return Err(ChannelError::Close(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
- }
+ let shutdown_scriptpubkey = match ShutdownScript::try_from((msg.scriptpubkey.clone(), their_features)) {
+ Ok(script) => script.into_inner(),
+ Err(_) => return Err(ChannelError::Close(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex()))),
+ };
if self.counterparty_shutdown_scriptpubkey.is_some() {
- if Some(&msg.scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() {
- return Err(ChannelError::Close(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
+ if Some(&shutdown_scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() {
+ return Err(ChannelError::Close(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", shutdown_scriptpubkey.to_bytes().to_hex())));
}
} else {
- self.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
+ self.counterparty_shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
}
+ // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
+ // immediately after the commitment dance, but we can send a Shutdown because we won't send
+ // any further commitment updates after we set LocalShutdownSent.
+ let send_shutdown = (self.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
+
+ let update_shutdown_script = match self.shutdown_scriptpubkey {
+ Some(_) => false,
+ None => {
+ assert!(send_shutdown);
+ let shutdown_scriptpubkey = keys_provider.get_shutdown_scriptpubkey();
+ if !shutdown_scriptpubkey.is_compatible(their_features) {
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ }
+ self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+ true
+ },
+ };
+
// From here on out, we may not fail!
self.channel_state |= ChannelState::RemoteShutdownSent as u32;
self.update_time_counter += 1;
+ let monitor_update = if update_shutdown_script {
+ self.latest_monitor_update_id += 1;
+ Some(ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ }],
+ })
+ } else { None };
+ let shutdown = if send_shutdown {
+ Some(msgs::Shutdown {
+ channel_id: self.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ })
+ } else { None };
+
// We can't send our shutdown until we've committed all of our pending HTLCs, but the
// remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
// cell HTLCs and return them to fail the payment.
_ => true
}
});
- // If we have any LocalAnnounced updates we'll probably just get back a update_fail_htlc
- // immediately after the commitment dance, but we can send a Shutdown cause we won't send
- // any further commitment updates after we set LocalShutdownSent.
-
- let shutdown = if (self.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
- None
- } else {
- Some(msgs::Shutdown {
- channel_id: self.channel_id,
- scriptpubkey: self.get_closing_scriptpubkey(),
- })
- };
self.channel_state |= ChannelState::LocalShutdownSent as u32;
self.update_time_counter += 1;
- Ok((shutdown, self.maybe_propose_first_closing_signed(fee_estimator), dropped_outbound_htlcs))
+ Ok((shutdown, monitor_update, dropped_outbound_htlcs))
}
fn build_signed_closing_transaction(&self, tx: &mut Transaction, counterparty_sig: &Signature, sig: &Signature) {
return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
}
+ if self.is_outbound() && self.last_sent_closing_fee.is_none() {
+ return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
+ }
+
+ if self.channel_state & ChannelState::MonitorUpdateFailed as u32 != 0 {
+ self.pending_counterparty_closing_signed = Some(msg.clone());
+ return Ok((None, None));
+ }
+
let funding_redeemscript = self.get_funding_redeemscript();
let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
if used_total_fee != msg.fee_satoshis {
- return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee greater than the value they can claim. Fee in message: {}", msg.fee_satoshis)));
+ return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
}
let mut sighash = hash_to_message!(&bip143::SigHashCache::new(&closing_tx).signature_hash(0, &funding_redeemscript, self.channel_value_satoshis, SigHashType::All)[..]);
},
};
- let closing_tx_max_weight = self.get_closing_transaction_weight(
- if let Some(oup) = closing_tx.output.get(0) { Some(&oup.script_pubkey) } else { None },
- if let Some(oup) = closing_tx.output.get(1) { Some(&oup.script_pubkey) } else { None });
- if let Some((_, last_fee, sig)) = self.last_sent_closing_fee {
+ assert!(self.shutdown_scriptpubkey.is_some());
+ if let Some((last_fee, sig)) = self.last_sent_closing_fee {
if last_fee == msg.fee_satoshis {
self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
- assert!(closing_tx.get_weight() as u64 <= closing_tx_max_weight);
- debug_assert!(closing_tx.get_weight() as u64 >= closing_tx_max_weight - 2);
self.channel_state = ChannelState::ShutdownComplete as u32;
self.update_time_counter += 1;
return Ok((None, Some(closing_tx)));
}
}
- macro_rules! propose_new_feerate {
- ($new_feerate: expr) => {
- let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
- let (closing_tx, used_total_fee) = self.build_closing_transaction($new_feerate as u64 * tx_weight / 1000, false);
+ let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
+
+ macro_rules! propose_fee {
+ ($new_fee: expr) => {
+ let (mut tx, used_fee) = if $new_fee == msg.fee_satoshis {
+ (closing_tx, $new_fee)
+ } else {
+ self.build_closing_transaction($new_fee, false)
+ };
+
let sig = self.holder_signer
- .sign_closing_transaction(&closing_tx, &self.secp_ctx)
+ .sign_closing_transaction(&tx, &self.secp_ctx)
.map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
- assert!(closing_tx.get_weight() as u64 <= tx_weight);
- self.last_sent_closing_fee = Some(($new_feerate, used_total_fee, sig.clone()));
+
+ let signed_tx = if $new_fee == msg.fee_satoshis {
+ self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.update_time_counter += 1;
+ self.build_signed_closing_transaction(&mut tx, &msg.signature, &sig);
+ Some(tx)
+ } else { None };
+
+ self.last_sent_closing_fee = Some((used_fee, sig.clone()));
return Ok((Some(msgs::ClosingSigned {
channel_id: self.channel_id,
- fee_satoshis: used_total_fee,
+ fee_satoshis: used_fee,
signature: sig,
- }), None))
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: our_min_fee,
+ max_fee_satoshis: our_max_fee,
+ }),
+ }), signed_tx))
}
}
- let mut min_feerate = 253;
- if self.is_outbound() {
- let max_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
- if (msg.fee_satoshis as u64) > max_feerate as u64 * closing_tx_max_weight / 1000 {
- if let Some((last_feerate, _, _)) = self.last_sent_closing_fee {
- if max_feerate <= last_feerate {
- return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wanted something higher ({}) than our Normal feerate ({})", last_feerate, max_feerate)));
- }
+ if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
+ if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
+ return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
+ }
+ if max_fee_satoshis < our_min_fee {
+ return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
+ }
+ if min_fee_satoshis > our_max_fee {
+ return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
+ }
+
+ if !self.is_outbound() {
+ // They have to pay, so pick the highest fee in the overlapping range.
+ debug_assert_eq!(our_max_fee, u64::max_value()); // We should never set an upper bound
+ propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
+ } else {
+ if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
+ return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
+ msg.fee_satoshis, our_min_fee, our_max_fee)));
}
- propose_new_feerate!(max_feerate);
+ // The proposed fee is in our acceptable range, accept it and broadcast!
+ propose_fee!(msg.fee_satoshis);
}
} else {
- min_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
- }
- if (msg.fee_satoshis as u64) < min_feerate as u64 * closing_tx_max_weight / 1000 {
- if let Some((last_feerate, _, _)) = self.last_sent_closing_fee {
- if min_feerate >= last_feerate {
- return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wanted something lower ({}) than our Background feerate ({}).", last_feerate, min_feerate)));
+ // Old fee style negotiation. We don't bother to enforce whether they are complying
+ // with the "making progress" requirements, we just comply and hope for the best.
+ if let Some((last_fee, _)) = self.last_sent_closing_fee {
+ if msg.fee_satoshis > last_fee {
+ if msg.fee_satoshis < our_max_fee {
+ propose_fee!(msg.fee_satoshis);
+ } else if last_fee < our_max_fee {
+ propose_fee!(our_max_fee);
+ } else {
+ return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
+ }
+ } else {
+ if msg.fee_satoshis > our_min_fee {
+ propose_fee!(msg.fee_satoshis);
+ } else if last_fee > our_min_fee {
+ propose_fee!(our_min_fee);
+ } else {
+ return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
+ }
+ }
+ } else {
+ if msg.fee_satoshis < our_min_fee {
+ propose_fee!(our_min_fee);
+ } else if msg.fee_satoshis > our_max_fee {
+ propose_fee!(our_max_fee);
+ } else {
+ propose_fee!(msg.fee_satoshis);
}
}
- propose_new_feerate!(min_feerate);
}
-
- let sig = self.holder_signer
- .sign_closing_transaction(&closing_tx, &self.secp_ctx)
- .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
- self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
- assert!(closing_tx.get_weight() as u64 <= closing_tx_max_weight);
- debug_assert!(closing_tx.get_weight() as u64 >= closing_tx_max_weight - 2);
-
- self.channel_state = ChannelState::ShutdownComplete as u32;
- self.update_time_counter += 1;
-
- Ok((Some(msgs::ClosingSigned {
- channel_id: self.channel_id,
- fee_satoshis: msg.fee_satoshis,
- signature: sig,
- }), Some(closing_tx)))
}
// Public utilities:
cmp::max(self.config.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
}
- #[cfg(test)]
+ pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
+ self.config.max_dust_htlc_exposure_msat
+ }
+
pub fn get_feerate(&self) -> u32 {
self.feerate_per_kw
}
+ pub fn get_dust_buffer_feerate(&self) -> u32 {
+ // When calculating our exposure to dust HTLCs, we assume that the channel feerate
+ // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
+ // whichever is higher. This ensures that we aren't suddenly exposed to significantly
+ // more dust balance if the feerate increases when we have several HTLCs pending
+ // which are near the dust limit.
+ let mut feerate_per_kw = self.feerate_per_kw;
+ if let Some((feerate, _)) = self.pending_update_fee {
+ feerate_per_kw = cmp::max(feerate_per_kw, feerate);
+ }
+ cmp::max(2530, feerate_per_kw * 1250 / 1000)
+ }
+
pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
self.cur_holder_commitment_transaction_number + 1
}
self.channel_state >= ChannelState::FundingSent as u32
}
+ /// Returns true if our peer has either initiated or agreed to shut down the channel.
+ pub fn received_shutdown(&self) -> bool {
+ (self.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
+ }
+
+ /// Returns true if we either initiated or agreed to shut down the channel.
+ pub fn sent_shutdown(&self) -> bool {
+ (self.channel_state & ChannelState::LocalShutdownSent as u32) != 0
+ }
+
/// Returns true if this channel is fully shut down. True here implies that no further actions
/// may/will be taken on this channel, and thus this object should be freed. Any future changes
/// will be handled appropriately by the chain monitor.
htlc_basepoint: keys.htlc_basepoint,
first_per_commitment_point,
channel_flags: if self.config.announced_channel {1} else {0},
- shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() })
+ shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
}
}
delayed_payment_basepoint: keys.delayed_payment_basepoint,
htlc_basepoint: keys.htlc_basepoint,
first_per_commitment_point,
- shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() })
+ shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
}
}
/// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
let counterparty_keys = self.build_remote_transaction_keys()?;
- let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, self.feerate_per_kw, logger).0;
+ let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).0;
Ok(self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, &self.secp_ctx)
.map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
}
return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
}
- let (outbound_htlc_count, htlc_outbound_value_msat) = self.get_outbound_pending_htlc_stats();
- if outbound_htlc_count + 1 > self.counterparty_max_accepted_htlcs as u32 {
+ let inbound_stats = self.get_inbound_pending_htlc_stats();
+ let outbound_stats = self.get_outbound_pending_htlc_stats();
+ if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 {
return Err(ChannelError::Ignore(format!("Cannot push more than their max accepted HTLCs ({})", self.counterparty_max_accepted_htlcs)));
}
// Check their_max_htlc_value_in_flight_msat
- if htlc_outbound_value_msat + amount_msat > self.counterparty_max_htlc_value_in_flight_msat {
+ if outbound_stats.pending_htlcs_value_msat + amount_msat > self.counterparty_max_htlc_value_in_flight_msat {
return Err(ChannelError::Ignore(format!("Cannot send value that would put us over the max HTLC value in flight our peer will accept ({})", self.counterparty_max_htlc_value_in_flight_msat)));
}
}
}
- let pending_value_to_self_msat = self.value_to_self_msat - htlc_outbound_value_msat;
+ let exposure_dust_limit_success_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis;
+ if amount_msat / 1000 < exposure_dust_limit_success_sats {
+ let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + amount_msat;
+ if on_counterparty_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
+ on_counterparty_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat())));
+ }
+ }
+
+ let exposure_dust_limit_timeout_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis;
+ if amount_msat / 1000 < exposure_dust_limit_timeout_sats {
+ let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + amount_msat;
+ if on_holder_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
+ on_holder_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat())));
+ }
+ }
+
+ let pending_value_to_self_msat = self.value_to_self_msat - outbound_stats.pending_htlcs_value_msat;
if pending_value_to_self_msat < amount_msat {
return Err(ChannelError::Ignore(format!("Cannot send value that would overdraw remaining funds. Amount: {}, pending value to self {}", amount_msat, pending_value_to_self_msat)));
}
// `2 *` and extra HTLC are for the fee spike buffer.
let commit_tx_fee_msat = if self.is_outbound() {
let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered);
- 2 * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(()))
+ FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(()))
} else { 0 };
if pending_value_to_self_msat - amount_msat < commit_tx_fee_msat {
return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", pending_value_to_self_msat, commit_tx_fee_msat)));
if (self.channel_state & (ChannelState::MonitorUpdateFailed as u32)) == (ChannelState::MonitorUpdateFailed as u32) {
panic!("Cannot create commitment tx while awaiting monitor update unfreeze, as send_htlc will have returned an Err so a send_commitment precondition has been violated");
}
- let mut have_updates = self.pending_update_fee.is_some();
+ let mut have_updates = self.is_outbound() && self.pending_update_fee.is_some();
for htlc in self.pending_outbound_htlcs.iter() {
if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
have_updates = true;
}
/// Only fails in case of bad keys
fn send_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> where L::Target: Logger {
+ log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
// We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
// fail to generate this, we still are at least at a position where upgrading their status
// is acceptable.
Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
} else { None };
if let Some(state) = new_state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
htlc.state = state;
}
}
if let Some(fail_reason) = if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut fail_reason) = &mut htlc.state {
Some(fail_reason.take())
} else { None } {
+ log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(fail_reason);
}
}
+ if let Some((feerate, update_state)) = self.pending_update_fee {
+ if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
+ debug_assert!(!self.is_outbound());
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+ self.feerate_per_kw = feerate;
+ self.pending_update_fee = None;
+ }
+ }
self.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
let (res, counterparty_commitment_txid, htlcs) = match self.send_commitment_no_state_update(logger) {
/// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation
/// when we shouldn't change HTLC/channel state.
fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
- let mut feerate_per_kw = self.feerate_per_kw;
- if let Some(feerate) = self.pending_update_fee {
- if self.is_outbound() {
- feerate_per_kw = feerate;
- }
- }
-
let counterparty_keys = self.build_remote_transaction_keys()?;
- let counterparty_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, feerate_per_kw, logger);
+ let counterparty_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+ let feerate_per_kw = counterparty_commitment_tx.1;
let counterparty_commitment_txid = counterparty_commitment_tx.0.trust().txid();
let (signature, htlc_signatures);
&& info.next_holder_htlc_id == self.next_holder_htlc_id
&& info.next_counterparty_htlc_id == self.next_counterparty_htlc_id
&& info.feerate == self.feerate_per_kw {
- let actual_fee = self.commit_tx_fee_msat(counterparty_commitment_tx.1);
+ let actual_fee = self.commit_tx_fee_msat(counterparty_commitment_tx.2);
assert_eq!(actual_fee, info.fee);
}
}
}
{
- let mut htlcs = Vec::with_capacity(counterparty_commitment_tx.2.len());
- for &(ref htlc, _) in counterparty_commitment_tx.2.iter() {
+ let mut htlcs = Vec::with_capacity(counterparty_commitment_tx.3.len());
+ for &(ref htlc, _) in counterparty_commitment_tx.3.iter() {
htlcs.push(htlc);
}
channel_id: self.channel_id,
signature,
htlc_signatures,
- }, (counterparty_commitment_txid, counterparty_commitment_tx.2)))
+ }, (counterparty_commitment_txid, counterparty_commitment_tx.3)))
}
/// Adds a pending outbound HTLC to this channel, and creates a signed commitment transaction
/// Begins the shutdown process, getting a message for the remote peer and returning all
/// holding cell HTLCs for payment failure.
- pub fn get_shutdown(&mut self) -> Result<(msgs::Shutdown, Vec<(HTLCSource, PaymentHash)>), APIError> {
+ pub fn get_shutdown<K: Deref>(&mut self, keys_provider: &K, their_features: &InitFeatures, target_feerate_sats_per_kw: Option<u32>)
+ -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+ where K::Target: KeysInterface<Signer = Signer> {
for htlc in self.pending_outbound_htlcs.iter() {
if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
}
- let closing_script = self.get_closing_scriptpubkey();
+ let update_shutdown_script = match self.shutdown_scriptpubkey {
+ Some(_) => false,
+ None => {
+ let shutdown_scriptpubkey = keys_provider.get_shutdown_scriptpubkey();
+ if !shutdown_scriptpubkey.is_compatible(their_features) {
+ return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+ }
+ self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+ true
+ },
+ };
// From here on out, we may not fail!
+ self.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
if self.channel_state < ChannelState::FundingSent as u32 {
self.channel_state = ChannelState::ShutdownComplete as u32;
} else {
}
self.update_time_counter += 1;
+ let monitor_update = if update_shutdown_script {
+ self.latest_monitor_update_id += 1;
+ Some(ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ }],
+ })
+ } else { None };
+ let shutdown = msgs::Shutdown {
+ channel_id: self.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ };
+
// Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
// our shutdown until we've committed all of the pending changes.
self.holding_cell_update_fee = None;
}
});
- Ok((msgs::Shutdown {
- channel_id: self.channel_id,
- scriptpubkey: closing_script,
- }, dropped_outbound_htlcs))
+ Ok((shutdown, monitor_update, dropped_outbound_htlcs))
}
/// Gets the latest commitment transaction and any dependent transactions for relay (forcing
}
}
-fn is_unsupported_shutdown_script(their_features: &InitFeatures, script: &Script) -> bool {
- // We restrain shutdown scripts to standards forms to avoid transactions not propagating on the p2p tx-relay network
-
- // BOLT 2 says we must only send a scriptpubkey of certain standard forms,
- // which for a a BIP-141-compliant witness program is at max 42 bytes in length.
- // So don't let the remote peer feed us some super fee-heavy script.
- let is_script_too_long = script.len() > 42;
- if is_script_too_long {
- return true;
- }
-
- if their_features.supports_shutdown_anysegwit() && script.is_witness_program() && script.as_bytes()[0] != OP_PUSHBYTES_0.into_u8() {
- return false;
- }
-
- return !script.is_p2pkh() && !script.is_p2sh() && !script.is_v0_p2wpkh() && !script.is_v0_p2wsh()
-}
-
const SERIALIZATION_VERSION: u8 = 2;
const MIN_SERIALIZATION_VERSION: u8 = 1;
(key_data.0.len() as u32).write(writer)?;
writer.write_all(&key_data.0[..])?;
- self.shutdown_pubkey.write(writer)?;
+ // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
+ // deserialized from that format.
+ match self.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
+ Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
+ None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
+ }
self.destination_script.write(writer)?;
self.cur_holder_commitment_transaction_number.write(writer)?;
fail_reason.write(writer)?;
}
- self.pending_update_fee.write(writer)?;
+ if self.is_outbound() {
+ self.pending_update_fee.map(|(a, _)| a).write(writer)?;
+ } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.pending_update_fee {
+ // As for inbound HTLCs, if the update was only announced and never committed, drop it.
+ Some(feerate).write(writer)?;
+ } else {
+ None::<u32>.write(writer)?;
+ }
self.holding_cell_update_fee.write(writer)?;
self.next_holder_htlc_id.write(writer)?;
self.update_time_counter.write(writer)?;
self.feerate_per_kw.write(writer)?;
- match self.last_sent_closing_fee {
- Some((feerate, fee, sig)) => {
- 1u8.write(writer)?;
- feerate.write(writer)?;
- fee.write(writer)?;
- sig.write(writer)?;
- },
- None => 0u8.write(writer)?,
- }
+ // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
+ // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
+ // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
+ // consider the stale state on reload.
+ 0u8.write(writer)?;
self.funding_tx_confirmed_in.write(writer)?;
self.funding_tx_confirmation_height.write(writer)?;
(1, self.minimum_depth, option),
(3, self.counterparty_selected_channel_reserve_satoshis, option),
(5, self.config, required),
+ (7, self.shutdown_scriptpubkey, option),
+ (9, self.target_closing_feerate_sats_per_kw, option),
});
Ok(())
}
let holder_signer = keys_source.read_chan_signer(&keys_data)?;
- let shutdown_pubkey = Readable::read(reader)?;
+ // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
+ let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
+ Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
+ Err(_) => None,
+ };
let destination_script = Readable::read(reader)?;
let cur_holder_commitment_transaction_number = Readable::read(reader)?;
monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
}
- let pending_update_fee = Readable::read(reader)?;
+ let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
+
let holding_cell_update_fee = Readable::read(reader)?;
let next_holder_htlc_id = Readable::read(reader)?;
let update_time_counter = Readable::read(reader)?;
let feerate_per_kw = Readable::read(reader)?;
- let last_sent_closing_fee = match <u8 as Readable>::read(reader)? {
- 0 => None,
- 1 => Some((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)),
+ // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
+ // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
+ // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
+ // consider the stale state on reload.
+ match <u8 as Readable>::read(reader)? {
+ 0 => {},
+ 1 => {
+ let _: u32 = Readable::read(reader)?;
+ let _: u64 = Readable::read(reader)?;
+ let _: Signature = Readable::read(reader)?;
+ },
_ => return Err(DecodeError::InvalidValue),
- };
+ }
let funding_tx_confirmed_in = Readable::read(reader)?;
let funding_tx_confirmation_height = Readable::read(reader)?;
_ => return Err(DecodeError::InvalidValue),
};
- let channel_parameters = Readable::read(reader)?;
+ let channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
let funding_transaction = Readable::read(reader)?;
let counterparty_cur_commitment_point = Readable::read(reader)?;
}
}
+ let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
+ Some((feerate, if channel_parameters.is_outbound_from_holder {
+ FeeUpdateState::Outbound
+ } else {
+ FeeUpdateState::AwaitingRemoteRevokeToAnnounce
+ }))
+ } else {
+ None
+ };
+
let mut announcement_sigs = None;
+ let mut target_closing_feerate_sats_per_kw = None;
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(1, minimum_depth, option),
(3, counterparty_selected_channel_reserve_satoshis, option),
(5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
+ (7, shutdown_scriptpubkey, option),
+ (9, target_closing_feerate_sats_per_kw, option),
});
let mut secp_ctx = Secp256k1::new();
latest_monitor_update_id,
holder_signer,
- shutdown_pubkey,
+ shutdown_scriptpubkey,
destination_script,
cur_holder_commitment_transaction_number,
#[cfg(debug_assertions)]
counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
- last_sent_closing_fee,
+ last_sent_closing_fee: None,
+ pending_counterparty_closing_signed: None,
+ closing_fee_limits: None,
+ target_closing_feerate_sats_per_kw,
funding_tx_confirmed_in,
funding_tx_confirmation_height,
commitment_secrets,
channel_update_status,
+ closing_signed_in_flight: false,
announcement_sigs,
use ln::channel::MAX_FUNDING_SATOSHIS;
use ln::features::InitFeatures;
use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate};
+ use ln::script::ShutdownScript;
use ln::chan_utils;
use ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT};
use chain::BestBlock;
use chain::transaction::OutPoint;
use util::config::UserConfig;
use util::enforcing_trait_impls::EnforcingSigner;
+ use util::errors::APIError;
use util::test_utils;
+ use util::test_utils::OnGetShutdownScriptpubkey;
use util::logger::Logger;
use bitcoin::secp256k1::{Secp256k1, Message, Signature, All};
use bitcoin::secp256k1::ffi::Signature as FFISignature;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::Hash;
use bitcoin::hash_types::{Txid, WPubkeyHash};
+ use core::num::NonZeroU8;
use sync::Arc;
use prelude::*;
Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script()
}
- fn get_shutdown_pubkey(&self) -> PublicKey {
+ fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
let secp_ctx = Secp256k1::signing_only();
let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
- PublicKey::from_secret_key(&secp_ctx, &channel_close_key)
+ ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key))
}
fn get_channel_signer(&self, _inbound: bool, _channel_value_satoshis: u64) -> InMemorySigner {
PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
}
+ #[test]
+ fn upfront_shutdown_script_incompatibility() {
+ let features = InitFeatures::known().clear_shutdown_anysegwit();
+ let non_v0_segwit_shutdown_script =
+ ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap();
+
+ let seed = [42; 32];
+ let network = Network::Testnet;
+ let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+ keys_provider.expect(OnGetShutdownScriptpubkey {
+ returns: non_v0_segwit_shutdown_script.clone(),
+ });
+
+ let fee_estimator = TestFeeEstimator { fee_est: 253 };
+ let secp_ctx = Secp256k1::new();
+ let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let config = UserConfig::default();
+ match Channel::<EnforcingSigner>::new_outbound(&&fee_estimator, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config) {
+ Err(APIError::IncompatibleShutdownScript { script }) => {
+ assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
+ },
+ Err(e) => panic!("Unexpected error: {:?}", e),
+ Ok(_) => panic!("Expected error"),
+ }
+ }
+
// Check that, during channel creation, we use the same feerate in the open channel message
// as we do in the Channel object creation itself.
#[test]
let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_a_node_id, 10000000, 100000, 42, &config).unwrap();
+ let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_a_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
// Now change the fee so we can check that the fee in the open_channel message is the
// same as the old fee.
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
+ let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
// Make sure A's dust limit is as we expect.
let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), &open_channel_msg, 7, &config).unwrap();
+ let node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config).unwrap();
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
let mut accept_channel_msg = node_b_chan.get_accept_channel();
accept_channel_msg.dust_limit_satoshis = 546;
- node_a_chan.accept_channel(&accept_channel_msg, &config, InitFeatures::known()).unwrap();
+ node_a_chan.accept_channel(&accept_channel_msg, &config, &InitFeatures::known()).unwrap();
node_a_chan.holder_dust_limit_satoshis = 1560;
// Put some inbound and outbound HTLCs in A's channel.
let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_id, 10000000, 100000, 42, &config).unwrap();
+ let mut chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
let commitment_tx_fee_0_htlcs = chan.commit_tx_fee_msat(0);
let commitment_tx_fee_1_htlc = chan.commit_tx_fee_msat(1);
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
+ let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), &open_channel_msg, 7, &config).unwrap();
+ let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config).unwrap();
// Node B --> Node A: accept channel
let accept_channel_msg = node_b_chan.get_accept_channel();
- node_a_chan.accept_channel(&accept_channel_msg, &config, InitFeatures::known()).unwrap();
+ node_a_chan.accept_channel(&accept_channel_msg, &config, &InitFeatures::known()).unwrap();
// Node A --> Node B: funding created
let output_script = node_a_chan.get_funding_redeemscript();
// Create a channel.
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
+ let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
assert!(node_a_chan.counterparty_forwarding_info.is_none());
assert_eq!(node_a_chan.holder_htlc_minimum_msat, 1); // the default
assert!(node_a_chan.counterparty_forwarding_info().is_none());
let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let mut config = UserConfig::default();
config.channel_options.announced_channel = false;
- let mut chan = Channel::<InMemorySigner>::new_outbound(&&feeest, &&keys_provider, counterparty_node_id, 10_000_000, 100000, 42, &config).unwrap(); // Nothing uses their network key in this test
+ let mut chan = Channel::<InMemorySigner>::new_outbound(&&feeest, &&keys_provider, counterparty_node_id, &InitFeatures::known(), 10_000_000, 100000, 42, &config).unwrap(); // Nothing uses their network key in this test
chan.holder_dust_limit_satoshis = 546;
chan.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
$( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
} ) => { {
let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
- let mut res = chan.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, chan.feerate_per_kw, &logger);
+ let mut res = chan.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
- let htlcs = res.2.drain(..)
+ let htlcs = res.3.drain(..)
.filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
.collect();
(res.0, htlcs)
use chain;
use chain::{Confirm, Watch, BestBlock};
-use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
+use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use chain::transaction::{OutPoint, TransactionData};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
#[cfg(any(test, feature = "allow_wallclock_use"))]
use std::time::Instant;
use core::ops::Deref;
-use bitcoin::hashes::hex::ToHex;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
Self {
err: match err {
+ ChannelError::Warn(msg) => LightningError {
+ err: msg,
+ action: msgs::ErrorAction::IgnoreError,
+ },
ChannelError::Ignore(msg) => LightningError {
err: msg,
action: msgs::ErrorAction::IgnoreError,
/// Because adding or removing an entry is rare, we usually take an outer read lock and then
/// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
/// new channel.
+ ///
+ /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
pending_events: Mutex<Vec<events::Event>>,
macro_rules! convert_chan_err {
($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
match $err {
+ ChannelError::Warn(msg) => {
+ //TODO: Once warning messages are merged, we should send a `warning` message to our
+ //peer here.
+ (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+ },
ChannelError::Ignore(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
},
}
}
+macro_rules! remove_channel {
+ ($channel_state: expr, $entry: expr) => {
+ {
+ let channel = $entry.remove_entry().1;
+ if let Some(short_id) = channel.get_short_channel_id() {
+ $channel_state.short_to_id.remove(&short_id);
+ }
+ channel
+ }
+ }
+}
+
macro_rules! handle_monitor_err {
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
- let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
- let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?;
+ let channel = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ match per_peer_state.get(&their_network_key) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
+ Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
+ }
+ };
let res = channel.get_open_channel(self.genesis_hash.clone());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
}
- /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
- /// will be accepted on the given channel, and after additional timeout/the closing of all
- /// pending HTLCs, the channel will be closed on chain.
- ///
- /// May generate a SendShutdown message event on success, which should be relayed.
- pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let (mut failed_htlcs, chan_option) = {
+ let counterparty_node_id;
+ let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
+ let result: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
- let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
+ counterparty_node_id = chan_entry.get().get_counterparty_node_id();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ };
+ failed_htlcs = htlcs;
+
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update {
+ if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ let (result, is_permanent) =
+ handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+ if is_permanent {
+ remove_channel!(channel_state, chan_entry);
+ break result;
+ }
+ }
+ }
+
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: chan_entry.get().get_counterparty_node_id(),
+ node_id: counterparty_node_id,
msg: shutdown_msg
});
+
if chan_entry.get().is_shutdown() {
- if let Some(short_id) = chan_entry.get().get_short_channel_id() {
- channel_state.short_to_id.remove(&short_id);
+ let channel = remove_channel!(channel_state, chan_entry);
+ if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: channel_update
+ });
}
- (failed_htlcs, Some(chan_entry.remove_entry().1))
- } else { (failed_htlcs, None) }
+ }
+ break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
}
};
+
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- let chan_update = if let Some(chan) = chan_option {
- self.get_channel_update_for_broadcast(&chan).ok()
- } else { None };
-
- if let Some(update) = chan_update {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
+ let _ = handle_error!(self, result, counterparty_node_id);
Ok(())
}
+ /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+ /// will be accepted on the given channel, and after additional timeout/the closing of all
+ /// pending HTLCs, the channel will be closed on chain.
+ ///
+ /// * If we are the channel initiator, we will pay between our [`Background`] and
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+ /// estimate.
+ /// * If our counterparty is the channel initiator, we will require a channel closing
+ /// transaction feerate of at least our [`Background`] feerate or the feerate which
+ /// would appear on a force-closure transaction, whichever is lower. We will allow our
+ /// counterparty to pay as much fee as they'd like, however.
+ ///
+ /// May generate a SendShutdown message event on success, which should be relayed.
+ ///
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+ /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+ /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, None)
+ }
+
+ /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+ /// will be accepted on the given channel, and after additional timeout/the closing of all
+ /// pending HTLCs, the channel will be closed on chain.
+ ///
+ /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
+ /// the channel being closed or not:
+ /// * If we are the channel initiator, we will pay at least this feerate on the closing
+ /// transaction. The upper-bound is set by
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+ /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
+ /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure
+ /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
+ /// will appear on a force-closure transaction, whichever is lower).
+ ///
+ /// May generate a SendShutdown message event on success, which should be relayed.
+ ///
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+ /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+ /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight))
+ }
+
#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
let (monitor_update_option, mut failed_htlcs) = shutdown_res;
let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
- let (next_hop_data, next_hop_hmac) = {
- match msgs::OnionHopData::read(&mut chacha_stream) {
+ let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = {
+ match <msgs::OnionHopData as Readable>::read(&mut chacha_stream) {
Err(err) => {
let error_code = match err {
msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
// for now more than 10 paths likely carries too much one-path failure.
return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
}
+ if payment_secret.is_none() && route.paths.len() > 1 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
+ }
let mut total_value = 0;
let our_node_id = self.get_our_node_id();
let mut path_errs = Vec::with_capacity(route.paths.len());
/// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
/// never reach the recipient.
///
+ /// See [`send_payment`] documentation for more details on the return value of this function.
+ ///
/// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
/// [`send_payment`] for more information about the risks of duplicate preimage usage.
///
+ /// Note that `route` must have exactly one path.
+ ///
/// [`send_payment`]: Self::send_payment
pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<PaymentHash, PaymentSendFailure> {
let preimage = match payment_preimage {
// close channel and then send error message to peer.
let counterparty_node_id = chan.get().get_counterparty_node_id();
let err: Result<(), _> = match e {
- ChannelError::Ignore(_) => {
+ ChannelError::Ignore(_) | ChannelError::Warn(_) => {
panic!("Stated return value requirements in send_commitment() were not met");
- },
+ }
ChannelError::Close(msg) => {
log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
let (channel_id, mut channel) = chan.remove_entry();
self.process_background_events();
}
- /// If a peer is disconnected we mark any channels with that peer as 'disabled'.
- /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
- /// to inform the network about the uselessness of these channels.
+ fn update_channel_fee(&self, short_to_id: &mut HashMap<u64, [u8; 32]>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+ if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
+ // If the feerate has decreased by less than half, don't bother
+ if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
+ log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+ log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ return (true, NotifyOption::SkipPersist, Ok(()));
+ }
+ if !chan.is_live() {
+ log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
+ log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ return (true, NotifyOption::SkipPersist, Ok(()));
+ }
+ log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
+ log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+
+ let mut retain_channel = true;
+ let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
+ Ok(res) => Ok(res),
+ Err(e) => {
+ let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+ if drop { retain_channel = false; }
+ Err(res)
+ }
+ };
+ let ret_err = match res {
+ Ok(Some((update_fee, commitment_signed, monitor_update))) => {
+ if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id);
+ if drop { retain_channel = false; }
+ res
+ } else {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: Some(update_fee),
+ commitment_signed,
+ },
+ });
+ Ok(())
+ }
+ },
+ Ok(None) => Ok(()),
+ Err(e) => Err(e),
+ };
+ (retain_channel, NotifyOption::DoPersist, ret_err)
+ }
+
+ #[cfg(fuzzing)]
+ /// In chanmon_consistency we want to sometimes do the channel fee updates done in
+ /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
+ /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
+ /// it wants to detect). Thus, we have a variant exposed here for its benefit.
+ pub fn maybe_update_chan_fees(&self) {
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut should_persist = NotifyOption::SkipPersist;
+
+ let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+ let mut handle_errors = Vec::new();
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ let short_to_id = &mut channel_state.short_to_id;
+ channel_state.by_id.retain(|chan_id, chan| {
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+ if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+ if err.is_err() {
+ handle_errors.push(err);
+ }
+ retain_channel
+ });
+ }
+
+ should_persist
+ });
+ }
+
+ /// Performs actions which should happen on startup and roughly once per minute thereafter.
///
- /// This method handles all the details, and must be called roughly once per minute.
+ /// This currently includes:
+ /// * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
+ /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
+ /// than a minute, informing the network that they should no longer attempt to route over
+ /// the channel.
///
- /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
+ /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
+ /// estimate fetches.
pub fn timer_tick_occurred(&self) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = NotifyOption::SkipPersist;
if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- for (_, chan) in channel_state.by_id.iter_mut() {
- match chan.channel_update_status() {
- ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
- ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
- ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
- ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
- ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
- },
- ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
- },
- _ => {},
- }
+ let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+ let mut handle_errors = Vec::new();
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ let short_to_id = &mut channel_state.short_to_id;
+ channel_state.by_id.retain(|chan_id, chan| {
+ let counterparty_node_id = chan.get_counterparty_node_id();
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+ if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+ if err.is_err() {
+ handle_errors.push((err, counterparty_node_id));
+ }
+ if !retain_channel { return false; }
+
+ if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+ let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+ handle_errors.push((Err(err), chan.get_counterparty_node_id()));
+ if needs_close { return false; }
+ }
+
+ match chan.channel_update_status() {
+ ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
+ ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
+ ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+ ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+ ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+ },
+ ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+ },
+ _ => {},
+ }
+
+ true
+ });
}
+ for (err, counterparty_node_id) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, counterparty_node_id);
+ }
should_persist
});
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
}
- let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration)
+ let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration)
.map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, &their_features), channel_state, chan);
(chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
}
fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
- let (mut dropped_htlcs, chan_option) = {
+ let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
+ let result: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry);
+
+ if !chan_entry.get().received_shutdown() {
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ log_bytes!(msg.channel_id),
+ if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+ }
+
+ let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry);
+ dropped_htlcs = htlcs;
+
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update {
+ if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ let (result, is_permanent) =
+ handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+ if is_permanent {
+ remove_channel!(channel_state, chan_entry);
+ break result;
+ }
+ }
+ }
+
if let Some(msg) = shutdown {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id.clone(),
+ node_id: *counterparty_node_id,
msg,
});
}
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
- if chan_entry.get().is_shutdown() {
- if let Some(short_id) = chan_entry.get().get_short_channel_id() {
- channel_state.short_to_id.remove(&short_id);
- }
- (dropped_htlcs, Some(chan_entry.remove_entry().1))
- } else { (dropped_htlcs, None) }
+
+ break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
for htlc_source in dropped_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- if let Some(chan) = chan_option {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- }
+
+ let _ = handle_error!(self, result, *counterparty_node_id);
Ok(())
}
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) =
- match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) {
+ let (revoke_and_ack, commitment_signed, monitor_update) =
+ match chan.get_mut().commitment_signed(&msg, &self.logger) {
Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
Err((Some(update), e)) => {
assert!(chan.get().is_awaiting_monitor_update());
};
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
- //TODO: Rebroadcast closing_signed if present on monitor update restoration
}
channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
node_id: counterparty_node_id.clone(),
},
});
}
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
Ok(())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
- let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) =
- break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
+ let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) =
+ break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
htlcs_to_fail = htlcs_to_fail_in;
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
if was_frozen_for_monitor {
- assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
+ assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
} else {
if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) {
updates,
});
}
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap()))
},
hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
Ok(())
}
- /// Begin Update fee process. Allowed only on an outbound channel.
- /// If successful, will generate a UpdateHTLCs event, so you should probably poll
- /// PeerManager::process_events afterwards.
- /// Note: This API is likely to change!
- /// (C-not exported) Cause its doc(hidden) anyway
- #[doc(hidden)]
- pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let counterparty_node_id;
- let err: Result<(), _> = loop {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
-
- match channel_state.by_id.entry(channel_id) {
- hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}),
- hash_map::Entry::Occupied(mut chan) => {
- if !chan.get().is_outbound() {
- return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()});
- }
- if chan.get().is_awaiting_monitor_update() {
- return Err(APIError::MonitorUpdateFailed);
- }
- if !chan.get().is_live() {
- return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()});
- }
- counterparty_node_id = chan.get().get_counterparty_node_id();
- if let Some((update_fee, commitment_signed, monitor_update)) =
- break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan)
- {
- if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- unimplemented!();
- }
- log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: Some(update_fee),
- commitment_signed,
- },
- });
- }
- },
- }
- return Ok(())
- };
-
- match handle_error!(self, err, counterparty_node_id) {
- Ok(_) => unreachable!(),
- Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
- }
- }
-
/// Process pending events from the `chain::Watch`, returning whether any events were processed.
fn process_pending_monitor_events(&self) -> bool {
let mut failed_channels = Vec::new();
});
}
- let has_update = has_monitor_update || !failed_htlcs.is_empty();
+ let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
for (failures, channel_id) in failed_htlcs.drain(..) {
self.fail_holding_cell_htlcs(failures, channel_id);
}
has_update
}
+ /// Check whether any channels have finished removing all pending updates after a shutdown
+ /// exchange and can now send a closing_signed.
+ /// Returns whether any closing_signed messages were generated.
+ fn maybe_generate_initial_closing_signed(&self) -> bool {
+ let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
+ let mut has_update = false;
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+
+ by_id.retain(|channel_id, chan| {
+ match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+ Ok((msg_opt, tx_opt)) => {
+ if let Some(msg) = msg_opt {
+ has_update = true;
+ pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: chan.get_counterparty_node_id(), msg,
+ });
+ }
+ if let Some(tx) = tx_opt {
+ // We're done with this channel. We got a closing_signed and sent back
+ // a closing_signed with a closing transaction to broadcast.
+ if let Some(short_id) = chan.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+
+ log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+ self.tx_broadcaster.broadcast_transaction(&tx);
+ false
+ } else { true }
+ },
+ Err(e) => {
+ has_update = true;
+ let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+ handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ !close_channel
+ }
+ }
+ });
+ }
+
+ for (counterparty_node_id, err) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, counterparty_node_id);
+ }
+
+ has_update
+ }
+
/// Handle a list of channel failures during a block_connected or block_disconnected call,
/// pushing the channel monitor update (if any) to the background events queue and removing the
/// Channel object.
if self.check_free_holding_cells() {
result = NotifyOption::DoPersist;
}
+ if self.maybe_generate_initial_closing_signed() {
+ result = NotifyOption::DoPersist;
+ }
let mut pending_events = Vec::new();
let mut channel_state = self.channel_state.lock().unwrap();
use bitcoin::hashes::sha256::Hash as Sha256;
use core::time::Duration;
use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+ use ln::channelmanager::PaymentSendFailure;
use ln::features::{InitFeatures, InvoiceFeatures};
use ln::functional_test_utils::*;
use ln::msgs;
use ln::msgs::ChannelMessageHandler;
use routing::router::{get_keysend_route, get_route};
+ use util::errors::APIError;
use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use util::test_utils;
nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
}
+
+ #[test]
+ fn test_multi_hop_missing_secret() {
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let logger = test_utils::TestLogger::new();
+
+ // Marshall an MPP route.
+ let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]);
+ let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+ let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+ let path = route.paths[0].clone();
+ route.paths.push(path);
+ route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+ route.paths[0][0].short_channel_id = chan_1_id;
+ route.paths[0][1].short_channel_id = chan_3_id;
+ route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+ route.paths[1][0].short_channel_id = chan_2_id;
+ route.paths[1][1].short_channel_id = chan_4_id;
+
+ match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() {
+ PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
+ assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) },
+ _ => panic!("unexpected error")
+ }
+ }
}
#[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
use ln::features::{InitFeatures, InvoiceFeatures};
use ln::functional_test_utils::*;
- use ln::msgs::ChannelMessageHandler;
+ use ln::msgs::{ChannelMessageHandler, Init};
use routing::network_graph::NetworkGraph;
use routing::router::get_route;
use util::test_utils;
});
let node_b_holder = NodeHolder { node: &node_b };
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() });
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() });
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
pub keys_manager: &'a test_utils::TestKeysInterface,
pub logger: &'a test_utils::TestLogger,
pub node_seed: [u8; 32],
+ pub features: InitFeatures,
}
pub struct Node<'a, 'b: 'a, 'c: 'b> {
}
}
+#[cfg(test)]
+macro_rules! get_channel_ref {
+ ($node: expr, $lock: ident, $channel_id: expr) => {
+ {
+ $lock = $node.node.channel_state.lock().unwrap();
+ $lock.by_id.get_mut(&$channel_id).unwrap()
+ }
+ }
+}
+
#[cfg(test)]
macro_rules! get_feerate {
($node: expr, $channel_id: expr) => {
{
- let chan_lock = $node.node.channel_state.lock().unwrap();
- let chan = chan_lock.by_id.get(&$channel_id).unwrap();
+ let mut lock;
+ let chan = get_channel_ref!($node, lock, $channel_id);
chan.get_feerate()
}
}
pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
- let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
+ let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) };
let (tx_a, tx_b);
node_a.close_channel(channel_id).unwrap();
let (as_update, bs_update) = if close_inbound_first {
assert!(node_a.get_and_clear_pending_msg_events().is_empty());
node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap());
- assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
- tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
- let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
- node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap());
- let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
- assert!(none_b.is_none());
+ node_b.handle_closing_signed(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()));
assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
+ let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
+
+ node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap());
+ let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
+ assert!(none_a.is_none());
+ assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
+ tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
(as_update, bs_update)
} else {
let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id());
node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a);
- assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
- tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
- let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
+ node_a.handle_closing_signed(&node_b.get_our_node_id(), &get_event_msg!(struct_b, MessageSendEvent::SendClosingSigned, node_a.get_our_node_id()));
- node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap());
- let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
- assert!(none_a.is_none());
assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
+ let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
+
+ node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap());
+ let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
+ assert!(none_b.is_none());
+ assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
+ tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
(as_update, bs_update)
};
assert_eq!(tx_a, tx_b);
for i in 0..node_count {
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[i].chain_source), &chanmon_cfgs[i].tx_broadcaster, &chanmon_cfgs[i].logger, &chanmon_cfgs[i].fee_estimator, &chanmon_cfgs[i].persister, &chanmon_cfgs[i].keys_manager);
let seed = [i as u8; 32];
- nodes.push(NodeCfg { chain_source: &chanmon_cfgs[i].chain_source, logger: &chanmon_cfgs[i].logger, tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster, fee_estimator: &chanmon_cfgs[i].fee_estimator, chain_monitor, keys_manager: &chanmon_cfgs[i].keys_manager, node_seed: seed });
+ nodes.push(NodeCfg {
+ chain_source: &chanmon_cfgs[i].chain_source,
+ logger: &chanmon_cfgs[i].logger,
+ tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster,
+ fee_estimator: &chanmon_cfgs[i].fee_estimator,
+ chain_monitor,
+ keys_manager: &chanmon_cfgs[i].keys_manager,
+ node_seed: seed,
+ features: InitFeatures::known(),
+ });
}
nodes
// When most of our tests were written, the default HTLC minimum was fixed at 1000.
// It now defaults to 1, so we simply set it to the expected value here.
default_config.own_channel_config.our_htlc_minimum_msat = 1000;
+ // When most of our tests were written, we didn't have the notion of a `max_dust_htlc_exposure_msat`,
+ // It now defaults to 5_000_000 msat; to avoid interfering with tests we bump it to 50_000_000 msat.
+ default_config.channel_options.max_dust_htlc_exposure_msat = 50_000_000;
default_config
}
for i in 0..node_count {
for j in (i+1)..node_count {
- nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
- nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: cfgs[j].features.clone() });
+ nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: cfgs[i].features.clone() });
}
}
use ln::functional_test_utils::*;
use ln::chan_utils::CommitmentTransaction;
-use ln::msgs::OptionalField::Present;
#[test]
fn test_insane_channel_opens() {
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
let logger = test_utils::TestLogger::new();
- let channel_id = chan.2;
// balancing
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
// (6) RAA is delivered ->
// First nodes[0] generates an update_fee
- nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock += 20;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let channel_id = chan.2;
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
let logger = test_utils::TestLogger::new();
// balancing
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
// First nodes[0] generates an update_fee
- nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock += 20;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let channel_id = chan.2;
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
// A B
// update_fee/commitment_signed ->
// revoke_and_ack ->
// First nodes[0] generates an update_fee
- let initial_feerate = get_feerate!(nodes[0], channel_id);
- nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap();
+ let initial_feerate;
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ initial_feerate = *feerate_lock;
+ *feerate_lock = initial_feerate + 20;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
// nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
// transaction:
- nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = initial_feerate + 40;
+ }
+ nodes[0].node.timer_tick_occurred();
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let channel_id = chan.2;
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let feerate = get_feerate!(nodes[0], channel_id);
- nodes[0].node.update_fee(channel_id, feerate+25).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock += 25;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
let channel_id = chan.2;
let feerate = 260;
- nodes[0].node.update_fee(channel_id, feerate).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = feerate;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
//Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially
//fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down)
- nodes[0].node.update_fee(channel_id, feerate+2).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = feerate + 2;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let channel_id = chan.2;
let logger = test_utils::TestLogger::new();
// balancing
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
- let feerate = get_feerate!(nodes[0], channel_id);
- nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock += 20;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
// revoke_and_ack ->
// Create and deliver (1)...
- let feerate = get_feerate!(nodes[0], channel_id);
- nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
+ let feerate;
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ feerate = *feerate_lock;
+ *feerate_lock = feerate + 20;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
check_added_monitors!(nodes[0], 1);
// Create and deliver (4)...
- nodes[0].node.update_fee(channel_id, feerate+30).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = feerate + 30;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events_0.len(), 1);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
}
-#[test]
-fn pre_funding_lock_shutdown_test() {
- // Test sending a shutdown prior to funding_locked after funding generation
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::known(), InitFeatures::known());
- mine_transaction(&nodes[0], &tx);
- mine_transaction(&nodes[1], &tx);
-
- nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
- let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
-
- let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
- let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
- let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
- assert!(node_0_none.is_none());
-
- assert!(nodes[0].node.list_channels().is_empty());
- assert!(nodes[1].node.list_channels().is_empty());
-}
-
-#[test]
-fn updates_shutdown_wait() {
- // Test sending a shutdown with outstanding updates pending
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
- let logger = test_utils::TestLogger::new();
-
- let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
-
- nodes[0].node.close_channel(&chan_1.2).unwrap();
- let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
-
- assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-
- let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
-
- let net_graph_msg_handler0 = &nodes[0].net_graph_msg_handler;
- let net_graph_msg_handler1 = &nodes[1].net_graph_msg_handler;
- let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler0.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
- let route_2 = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler1.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
- unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
- unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
-
- assert!(nodes[2].node.claim_funds(our_payment_preimage));
- check_added_monitors!(nodes[2], 1);
- let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
- assert!(updates.update_add_htlcs.is_empty());
- assert!(updates.update_fail_htlcs.is_empty());
- assert!(updates.update_fail_malformed_htlcs.is_empty());
- assert!(updates.update_fee.is_none());
- assert_eq!(updates.update_fulfill_htlcs.len(), 1);
- nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
- expect_payment_forwarded!(nodes[1], Some(1000), false);
- check_added_monitors!(nodes[1], 1);
- let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
- commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
-
- assert!(updates_2.update_add_htlcs.is_empty());
- assert!(updates_2.update_fail_htlcs.is_empty());
- assert!(updates_2.update_fail_malformed_htlcs.is_empty());
- assert!(updates_2.update_fee.is_none());
- assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
- nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
- commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
-
- let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::PaymentSent { ref payment_preimage } => {
- assert_eq!(our_payment_preimage, *payment_preimage);
- },
- _ => panic!("Unexpected event"),
- }
-
- let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
- let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
- let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
- assert!(node_0_none.is_none());
-
- assert!(nodes[0].node.list_channels().is_empty());
-
- assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
- nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
- close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
- assert!(nodes[1].node.list_channels().is_empty());
- assert!(nodes[2].node.list_channels().is_empty());
-}
-
-#[test]
-fn htlc_fail_async_shutdown() {
- // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
- let logger = test_utils::TestLogger::new();
-
- let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
- let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
- let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
- nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
- check_added_monitors!(nodes[0], 1);
- let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
- assert_eq!(updates.update_add_htlcs.len(), 1);
- assert!(updates.update_fulfill_htlcs.is_empty());
- assert!(updates.update_fail_htlcs.is_empty());
- assert!(updates.update_fail_malformed_htlcs.is_empty());
- assert!(updates.update_fee.is_none());
-
- nodes[1].node.close_channel(&chan_1.2).unwrap();
- let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
- let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
-
- nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
- nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
- check_added_monitors!(nodes[1], 1);
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
-
- let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
- assert!(updates_2.update_add_htlcs.is_empty());
- assert!(updates_2.update_fulfill_htlcs.is_empty());
- assert_eq!(updates_2.update_fail_htlcs.len(), 1);
- assert!(updates_2.update_fail_malformed_htlcs.is_empty());
- assert!(updates_2.update_fee.is_none());
-
- nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]);
- commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
-
- expect_payment_failed!(nodes[0], our_payment_hash, false);
-
- let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(msg_events.len(), 2);
- let node_0_closing_signed = match msg_events[0] {
- MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
- assert_eq!(*node_id, nodes[1].node.get_our_node_id());
- (*msg).clone()
- },
- _ => panic!("Unexpected event"),
- };
- match msg_events[1] {
- MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
- assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
- },
- _ => panic!("Unexpected event"),
- }
-
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
- let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
- let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
- assert!(node_0_none.is_none());
-
- assert!(nodes[0].node.list_channels().is_empty());
-
- assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
- nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
- close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
- assert!(nodes[1].node.list_channels().is_empty());
- assert!(nodes[2].node.list_channels().is_empty());
-}
-
-fn do_test_shutdown_rebroadcast(recv_count: u8) {
- // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
- // messages delivered prior to disconnect
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
-
- let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
-
- nodes[1].node.close_channel(&chan_1.2).unwrap();
- let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- if recv_count > 0 {
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
- let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- if recv_count > 1 {
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- }
- }
-
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
-
- nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish);
- let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- assert!(node_1_shutdown == node_1_2nd_shutdown);
-
- nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish);
- let node_0_2nd_shutdown = if recv_count > 0 {
- let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
- node_0_2nd_shutdown
- } else {
- let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
- assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
- get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
- };
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown);
-
- assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-
- assert!(nodes[2].node.claim_funds(our_payment_preimage));
- check_added_monitors!(nodes[2], 1);
- let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
- assert!(updates.update_add_htlcs.is_empty());
- assert!(updates.update_fail_htlcs.is_empty());
- assert!(updates.update_fail_malformed_htlcs.is_empty());
- assert!(updates.update_fee.is_none());
- assert_eq!(updates.update_fulfill_htlcs.len(), 1);
- nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
- expect_payment_forwarded!(nodes[1], Some(1000), false);
- check_added_monitors!(nodes[1], 1);
- let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
- commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
-
- assert!(updates_2.update_add_htlcs.is_empty());
- assert!(updates_2.update_fail_htlcs.is_empty());
- assert!(updates_2.update_fail_malformed_htlcs.is_empty());
- assert!(updates_2.update_fee.is_none());
- assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
- nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
- commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
-
- let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::PaymentSent { ref payment_preimage } => {
- assert_eq!(our_payment_preimage, *payment_preimage);
- },
- _ => panic!("Unexpected event"),
- }
-
- let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
- if recv_count > 0 {
- nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
- let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
- assert!(node_1_closing_signed.is_some());
- }
-
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- if recv_count == 0 {
- // If all closing_signeds weren't delivered we can just resume where we left off...
- let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
-
- nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish);
- let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
-
- nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish);
- let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
-
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown);
- let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
- assert!(node_0_closing_signed == node_0_2nd_closing_signed);
-
- nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed);
- let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
- let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
- assert!(node_0_none.is_none());
- } else {
- // If one node, however, received + responded with an identical closing_signed we end
- // up erroring and node[0] will try to broadcast its own latest commitment transaction.
- // There isn't really anything better we can do simply, but in the future we might
- // explore storing a set of recently-closed channels that got disconnected during
- // closing_signed and avoiding broadcasting local commitment txn for some timeout to
- // give our counterparty enough time to (potentially) broadcast a cooperative closing
- // transaction.
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-
- nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish);
- let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(msg_events.len(), 1);
- if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
- match action {
- &ErrorAction::SendErrorMessage { ref msg } => {
- nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
- assert_eq!(msg.channel_id, chan_1.2);
- },
- _ => panic!("Unexpected event!"),
- }
- } else { panic!("Needed SendErrorMessage close"); }
-
- // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
- // checks it, but in this case nodes[0] didn't ever get a chance to receive a
- // closing_signed so we do it ourselves
- check_closed_broadcast!(nodes[0], false);
- check_added_monitors!(nodes[0], 1);
- }
-
- assert!(nodes[0].node.list_channels().is_empty());
-
- assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
- nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
- close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
- assert!(nodes[1].node.list_channels().is_empty());
- assert!(nodes[2].node.list_channels().is_empty());
-}
-
-#[test]
-fn test_shutdown_rebroadcast() {
- do_test_shutdown_rebroadcast(0);
- do_test_shutdown_rebroadcast(1);
- do_test_shutdown_rebroadcast(2);
-}
-
#[test]
fn fake_network_test() {
// Simple test which builds a network of ChannelManagers, connects them to each other, and
bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
excess_data: Vec::new(),
- };
+ }
}
}
let seed = [42; 32];
let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
- let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, node_seed: seed };
+ let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, node_seed: seed, features: InitFeatures::known() };
let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
node_cfgs.remove(0);
node_cfgs.insert(0, node);
// First nodes[0] generates an update_fee, setting the channel's
// pending_update_fee.
- nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 20).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock += 20;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let events = nodes[0].node.get_and_clear_pending_msg_events();
// First nodes[0] generates an update_fee, setting the channel's
// pending_update_fee.
- nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 200).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock += 200;
+ }
+ nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let events = nodes[0].node.get_and_clear_pending_msg_events();
// First nodes[1] generates an update_fee, setting the channel's
// pending_update_fee.
- nodes[1].node.update_fee(chan_1_2.2, get_feerate!(nodes[1], chan_1_2.2) + 20).unwrap();
+ {
+ let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock += 20;
+ }
+ nodes[1].node.timer_tick_occurred();
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
do_test_sweep_outbound_htlc_failure_update(true, false);
}
-#[test]
-fn test_upfront_shutdown_script() {
- // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening
- // enforce it at shutdown message
-
- let mut config = UserConfig::default();
- config.channel_options.announced_channel = true;
- config.peer_channel_config_limits.force_announced_channel_preference = false;
- config.channel_options.commit_upfront_shutdown_pubkey = false;
- let user_cfgs = [None, Some(config), None];
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
- let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-
- // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
- let flags = InitFeatures::known();
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
- let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
- node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
- // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer
- nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
- check_added_monitors!(nodes[2], 1);
-
- // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
- let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
- // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
- nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- let events = nodes[2].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
- _ => panic!("Unexpected event"),
- }
-
- // We test that if case of peer non-signaling we don't enforce committed script at channel opening
- let flags_no = InitFeatures::known().clear_upfront_shutdown_script();
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone());
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
- let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
- _ => panic!("Unexpected event"),
- }
-
- // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
- // channel smoothly, opt-out is from channel initiator here
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone());
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
- let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
- _ => panic!("Unexpected event"),
- }
-
- //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
- //// channel smoothly
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone());
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
- let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 2);
- match events[0] {
- MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
- _ => panic!("Unexpected event"),
- }
- match events[1] {
- MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
- _ => panic!("Unexpected event"),
- }
-}
-
-#[test]
-fn test_upfront_shutdown_script_unsupport_segwit() {
- // We test that channel is closed early
- // if a segwit program is passed as upfront shutdown script,
- // but the peer does not support segwit.
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
-
- let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(16)
- .push_slice(&[0, 0])
- .into_script());
-
- let features = InitFeatures::known().clear_shutdown_anysegwit();
- nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), features, &open_channel);
-
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
- assert_eq!(node_id, nodes[0].node.get_our_node_id());
- assert!(regex::Regex::new(r"Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: (\([A-Fa-f0-9]+\))").unwrap().is_match(&*msg.data));
- },
- _ => panic!("Unexpected event"),
- }
-}
-
-#[test]
-fn test_shutdown_script_any_segwit_allowed() {
- let mut config = UserConfig::default();
- config.channel_options.announced_channel = true;
- config.peer_channel_config_limits.force_announced_channel_preference = false;
- config.channel_options.commit_upfront_shutdown_pubkey = false;
- let user_cfgs = [None, Some(config), None];
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
- let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-
- //// We test if the remote peer accepts opt_shutdown_anysegwit, a witness program can be used on shutdown
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
- let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
- .push_slice(&[0, 0])
- .into_script();
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 2);
- match events[0] {
- MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
- _ => panic!("Unexpected event"),
- }
- match events[1] {
- MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
- _ => panic!("Unexpected event"),
- }
-}
-
-#[test]
-fn test_shutdown_script_any_segwit_not_allowed() {
- let mut config = UserConfig::default();
- config.channel_options.announced_channel = true;
- config.peer_channel_config_limits.force_announced_channel_preference = false;
- config.channel_options.commit_upfront_shutdown_pubkey = false;
- let user_cfgs = [None, Some(config), None];
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
- let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-
- //// We test that if the remote peer does not accept opt_shutdown_anysegwit, the witness program cannot be used on shutdown
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
- let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- // Make an any segwit version script
- node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
- .push_slice(&[0, 0])
- .into_script();
- let flags_no = InitFeatures::known().clear_shutdown_anysegwit();
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &flags_no, &node_0_shutdown);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 2);
- match events[1] {
- MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
- assert_eq!(node_id, nodes[1].node.get_our_node_id());
- assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020000) from remote peer".to_owned())
- },
- _ => panic!("Unexpected event"),
- }
- check_added_monitors!(nodes[0], 1);
-}
-
-#[test]
-fn test_shutdown_script_segwit_but_not_anysegwit() {
- let mut config = UserConfig::default();
- config.channel_options.announced_channel = true;
- config.peer_channel_config_limits.force_announced_channel_preference = false;
- config.channel_options.commit_upfront_shutdown_pubkey = false;
- let user_cfgs = [None, Some(config), None];
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
- let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-
- //// We test that if shutdown any segwit is supported and we send a witness script with 0 version, this is not accepted
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
- let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- // Make a segwit script that is not a valid as any segwit
- node_0_shutdown.scriptpubkey = Builder::new().push_int(0)
- .push_slice(&[0, 0])
- .into_script();
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 2);
- match events[1] {
- MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
- assert_eq!(node_id, nodes[1].node.get_our_node_id());
- assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned())
- },
- _ => panic!("Unexpected event"),
- }
- check_added_monitors!(nodes[0], 1);
-}
-
#[test]
fn test_user_configurable_csv_delay() {
// We test our channel constructors yield errors when we pass them absurd csv delay
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
- if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, &low_our_to_self_config) {
+ if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), 1000000, 1000000, 0, &low_our_to_self_config) {
match error {
APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
+ if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
match error {
ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
+ if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
match error {
ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
- if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
+ if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
if let Err(_) = watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
} else { assert!(false); }
assert_eq!(updates.update_add_htlcs.len(), 1);
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
- if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
+ if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Watchtower Alice should already have seen the block and reject the update
if let Err(_) = watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
claim_payment(&nodes[0], &path, test_preimage);
}
+
+fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, at_forward: bool, on_holder_tx: bool) {
+ // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` policy.
+ //
+ // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
+ // trimmed-to-dust HTLC outbound balance and this new payment as included on next counterparty
+ // commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the update.
+ // At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC inbound
+ // and trimmed-to-dust HTLC outbound balance and this new received HTLC as included on next
+ // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail the update.
+ // Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel might be
+ // available again for HTLC processing once the dust bandwidth has cleared up.
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut config = test_default_channel_config();
+ config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+ let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ open_channel.max_htlc_value_in_flight_msat = 50_000_000;
+ open_channel.max_accepted_htlcs = 60;
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+ let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ if on_holder_tx {
+ accept_channel.dust_limit_satoshis = 660;
+ }
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+ let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 1_000_000, 42);
+
+ if on_holder_tx {
+ if let Some(mut chan) = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) {
+ chan.holder_dust_limit_satoshis = 660;
+ }
+ }
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
+ check_added_monitors!(nodes[0], 1);
+
+ let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+ update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
+
+ if on_holder_tx {
+ if dust_outbound_balance {
+ for i in 0..2 {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 2_300_000);
+ if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
+ }
+ } else {
+ for _ in 0..2 {
+ route_payment(&nodes[0], &[&nodes[1]], 2_300_000);
+ }
+ }
+ } else {
+ if dust_outbound_balance {
+ for i in 0..25 {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 200_000); // + 177_000 msat of HTLC-success tx at 253 sats/kWU
+ if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
+ }
+ } else {
+ for _ in 0..25 {
+ route_payment(&nodes[0], &[&nodes[1]], 200_000); // + 167_000 msat of HTLC-timeout tx at 253 sats/kWU
+ }
+ }
+ }
+
+ if at_forward {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { 2_300_000 } else { 200_000 });
+ let mut config = UserConfig::default();
+ if on_holder_tx {
+ unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat)));
+ } else {
+ unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat)));
+ }
+ } else {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1 ], if on_holder_tx { 2_300_000 } else { 200_000 });
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ if on_holder_tx {
+ nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
+ } else {
+ nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
+ }
+ }
+
+ let _ = nodes[1].node.get_and_clear_pending_msg_events();
+ let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+ added_monitors.clear();
+}
+
+#[test]
+fn test_max_dust_htlc_exposure() {
+ do_test_max_dust_htlc_exposure(true, true, true);
+ do_test_max_dust_htlc_exposure(false, true, true);
+ do_test_max_dust_htlc_exposure(false, false, true);
+ do_test_max_dust_htlc_exposure(false, false, false);
+ do_test_max_dust_htlc_exposure(true, true, false);
+ do_test_max_dust_htlc_exposure(true, false, false);
+ do_test_max_dust_htlc_exposure(true, false, true);
+ do_test_max_dust_htlc_exposure(false, true, false);
+}
pub mod peer_handler;
pub mod chan_utils;
pub mod features;
+pub mod script;
#[cfg(feature = "fuzztarget")]
pub mod peer_channel_encryptor;
#[cfg(not(feature = "fuzztarget"))]
pub(crate) mod peer_channel_encryptor;
+#[cfg(feature = "fuzztarget")]
+pub mod channel;
+#[cfg(not(feature = "fuzztarget"))]
mod channel;
+
mod onion_utils;
mod wire;
#[allow(unused_mut)]
mod chanmon_update_fail_tests;
#[cfg(test)]
+#[allow(unused_mut)]
mod reorg_tests;
#[cfg(test)]
#[allow(unused_mut)]
mod onion_route_tests;
+#[cfg(test)]
+#[allow(unused_mut)]
+mod monitor_tests;
+#[cfg(test)]
+#[allow(unused_mut)]
+mod shutdown_tests;
pub use self::peer_channel_encryptor::LN_MAX_MSG_LEN;
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Further functional tests which test blockchain reorganizations.
+
+use chain::channelmonitor::ANTI_REORG_DELAY;
+use ln::{PaymentPreimage, PaymentHash};
+use ln::features::InitFeatures;
+use ln::msgs::{ChannelMessageHandler, HTLCFailChannelUpdate, ErrorAction};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use routing::router::get_route;
+
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::hashes::Hash;
+
+use prelude::*;
+
+use ln::functional_test_utils::*;
+
+#[test]
+fn chanmon_fail_from_stale_commitment() {
+ // If we forward an HTLC to our counterparty, but we force-closed the channel before our
+ // counterparty provides us an updated commitment transaction, we'll end up with a commitment
+ // transaction that does not contain the HTLC which we attempted to forward. In this case, we
+ // need to wait `ANTI_REORG_DELAY` blocks and then fail back the HTLC as there is no way for us
+ // to learn the preimage and the confirmed commitment transaction paid us the value of the
+ // HTLC.
+ //
+ // However, previously, we did not do this, ignoring the HTLC entirely.
+ //
+ // This could lead to channel closure if the sender we received the HTLC from decides to go on
+ // chain to get their HTLC back before it times out.
+ //
+ // Here, we check exactly this case, forwarding a payment from A, through B, to C, before B
+ // broadcasts its latest commitment transaction, which should result in it eventually failing
+ // the HTLC back off-chain to A.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let (update_a, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let bs_txn = get_local_commitment_txn!(nodes[1], chan_id_2);
+
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+
+ // Don't bother delivering the new HTLC add/commits, instead confirming the pre-HTLC commitment
+ // transaction for nodes[1].
+ mine_transaction(&nodes[1], &bs_txn[0]);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_broadcast!(nodes[1], true);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, true, true);
+ expect_payment_failed!(nodes[0], payment_hash, false);
+ expect_payment_failure_chan_update!(nodes[0], update_a.contents.short_channel_id, true);
+}
pub scriptpubkey: Script,
}
+/// The minimum and maximum fees which the sender is willing to place on the closing transaction.
+/// This is provided in [`ClosingSigned`] by both sides to indicate the fee range they are willing
+/// to use.
+#[derive(Clone, Debug, PartialEq)]
+pub struct ClosingSignedFeeRange {
+ /// The minimum absolute fee, in satoshis, which the sender is willing to place on the closing
+ /// transaction.
+ pub min_fee_satoshis: u64,
+ /// The maximum absolute fee, in satoshis, which the sender is willing to place on the closing
+ /// transaction.
+ pub max_fee_satoshis: u64,
+}
+
/// A closing_signed message to be sent or received from a peer
#[derive(Clone, Debug, PartialEq)]
pub struct ClosingSigned {
pub fee_satoshis: u64,
/// A signature on the closing transaction
pub signature: Signature,
+ /// The minimum and maximum fees which the sender is willing to accept, provided only by new
+ /// nodes.
+ pub fee_range: Option<ClosingSignedFeeRange>,
}
/// An update_add_htlc message to be sent or received from a peer
}
}
-impl_writeable!(ClosingSigned, 32+8+64, {
- channel_id,
- fee_satoshis,
- signature
+impl Writeable for ClosingSigned {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ w.size_hint(32 + 8 + 64 + if self.fee_range.is_some() { 1+1+ 2*8 } else { 0 });
+ self.channel_id.write(w)?;
+ self.fee_satoshis.write(w)?;
+ self.signature.write(w)?;
+ encode_tlv_stream!(w, {
+ (1, self.fee_range, option),
+ });
+ Ok(())
+ }
+}
+
+impl Readable for ClosingSigned {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let channel_id = Readable::read(r)?;
+ let fee_satoshis = Readable::read(r)?;
+ let signature = Readable::read(r)?;
+ let mut fee_range = None;
+ decode_tlv_stream!(r, {
+ (1, fee_range, option),
+ });
+ Ok(Self { channel_id, fee_satoshis, signature, fee_range })
+ }
+}
+
+impl_writeable!(ClosingSignedFeeRange, 2*8, {
+ min_fee_satoshis,
+ max_fee_satoshis
});
impl_writeable_len_match!(CommitmentSigned, {
channel_id: [2; 32],
fee_satoshis: 2316138423780173,
signature: sig_1,
+ fee_range: None,
};
let encoded_value = closing_signed.encode();
let target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034dd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
assert_eq!(encoded_value, target_value);
+ assert_eq!(msgs::ClosingSigned::read(&mut Cursor::new(&target_value)).unwrap(), closing_signed);
+
+ let closing_signed_with_range = msgs::ClosingSigned {
+ channel_id: [2; 32],
+ fee_satoshis: 2316138423780173,
+ signature: sig_1,
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: 0xdeadbeef,
+ max_fee_satoshis: 0x1badcafe01234567,
+ }),
+ };
+ let encoded_value_with_range = closing_signed_with_range.encode();
+ let target_value_with_range = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034dd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a011000000000deadbeef1badcafe01234567").unwrap();
+ assert_eq!(encoded_value_with_range, target_value_with_range);
+ assert_eq!(msgs::ClosingSigned::read(&mut Cursor::new(&target_value_with_range)).unwrap(),
+ closing_signed_with_range);
}
#[test]
match self.do_read_event(peer_descriptor, data) {
Ok(res) => Ok(res),
Err(e) => {
+ log_trace!(self.logger, "Peer sent invalid data or we decided to disconnect due to a protocol error");
self.disconnect_event_internal(peer_descriptor, e.no_connection_possible);
Err(e)
}
},
}
}
- };
+ }
}
}
Some(peer) => {
match peer.their_node_id {
Some(node_id) => {
+ log_trace!(self.logger,
+ "Handling disconnection of peer {}, with {}future connection to the peer possible.",
+ log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
peers.node_id_to_descriptor.remove(&node_id);
self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
},
}
}
- /// This function should be called roughly once every 30 seconds.
- /// It will send pings to each peer and disconnect those which did not respond to the last
- /// round of pings.
+ /// Send pings to each peer and disconnect those which did not respond to the last round of
+ /// pings.
+ ///
+ /// This may be called on any timescale you want, however, roughly once every five to ten
+ /// seconds is preferred. The call rate determines both how often we send a ping to our peers
+ /// and how much time they have to respond before we disconnect them.
///
/// May call [`send_data`] on all [`SocketDescriptor`]s. Thus, be very careful with reentrancy
/// issues!
--- /dev/null
+//! Abstractions for scripts used in the Lightning Network.
+
+use bitcoin::blockdata::opcodes::all::OP_PUSHBYTES_0 as SEGWIT_V0;
+use bitcoin::blockdata::script::{Builder, Script};
+use bitcoin::hashes::Hash;
+use bitcoin::hash_types::{PubkeyHash, ScriptHash, WPubkeyHash, WScriptHash};
+use bitcoin::secp256k1::key::PublicKey;
+
+use ln::features::InitFeatures;
+use ln::msgs::DecodeError;
+use util::ser::{Readable, Writeable, Writer};
+
+use core::convert::TryFrom;
+use core::num::NonZeroU8;
+use io;
+
+/// A script pubkey for shutting down a channel as defined by [BOLT #2].
+///
+/// [BOLT #2]: https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md
+#[derive(Clone)]
+pub struct ShutdownScript(ShutdownScriptImpl);
+
+/// An error occurring when converting from [`Script`] to [`ShutdownScript`].
+#[derive(Debug)]
+pub struct InvalidShutdownScript {
+ /// The script that did not meet the requirements from [BOLT #2].
+ ///
+ /// [BOLT #2]: https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md
+ pub script: Script
+}
+
+#[derive(Clone)]
+enum ShutdownScriptImpl {
+ /// [`PublicKey`] used to form a P2WPKH script pubkey. Used to support backward-compatible
+ /// serialization.
+ Legacy(PublicKey),
+
+ /// [`Script`] adhering to a script pubkey format specified in BOLT #2.
+ Bolt2(Script),
+}
+
+impl Writeable for ShutdownScript {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.0.write(w)
+ }
+
+ fn serialized_length(&self) -> usize {
+ self.0.serialized_length()
+ }
+}
+
+impl Readable for ShutdownScript {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(ShutdownScript(ShutdownScriptImpl::read(r)?))
+ }
+}
+
+impl_writeable_tlv_based_enum!(ShutdownScriptImpl, ;
+ (0, Legacy),
+ (1, Bolt2),
+);
+
+impl ShutdownScript {
+ /// Generates a P2WPKH script pubkey from the given [`PublicKey`].
+ pub(crate) fn new_p2wpkh_from_pubkey(pubkey: PublicKey) -> Self {
+ Self(ShutdownScriptImpl::Legacy(pubkey))
+ }
+
+ /// Generates a P2PKH script pubkey from the given [`PubkeyHash`].
+ pub fn new_p2pkh(pubkey_hash: &PubkeyHash) -> Self {
+ Self(ShutdownScriptImpl::Bolt2(Script::new_p2pkh(pubkey_hash)))
+ }
+
+ /// Generates a P2SH script pubkey from the given [`ScriptHash`].
+ pub fn new_p2sh(script_hash: &ScriptHash) -> Self {
+ Self(ShutdownScriptImpl::Bolt2(Script::new_p2sh(script_hash)))
+ }
+
+ /// Generates a P2WPKH script pubkey from the given [`WPubkeyHash`].
+ pub fn new_p2wpkh(pubkey_hash: &WPubkeyHash) -> Self {
+ Self(ShutdownScriptImpl::Bolt2(Script::new_v0_wpkh(pubkey_hash)))
+ }
+
+ /// Generates a P2WSH script pubkey from the given [`WScriptHash`].
+ pub fn new_p2wsh(script_hash: &WScriptHash) -> Self {
+ Self(ShutdownScriptImpl::Bolt2(Script::new_v0_wsh(script_hash)))
+ }
+
+ /// Generates a witness script pubkey from the given segwit version and program.
+ ///
+ /// Note for version-zero witness scripts you must use [`ShutdownScript::new_p2wpkh`] or
+ /// [`ShutdownScript::new_p2wsh`] instead.
+ ///
+ /// # Errors
+ ///
+ /// This function may return an error if `program` is invalid for the segwit `version`.
+ pub fn new_witness_program(version: NonZeroU8, program: &[u8]) -> Result<Self, InvalidShutdownScript> {
+ let script = Builder::new()
+ .push_int(version.get().into())
+ .push_slice(&program)
+ .into_script();
+ Self::try_from(script)
+ }
+
+ /// Converts the shutdown script into the underlying [`Script`].
+ pub fn into_inner(self) -> Script {
+ self.into()
+ }
+
+ /// Returns the [`PublicKey`] used for a P2WPKH shutdown script if constructed directly from it.
+ pub fn as_legacy_pubkey(&self) -> Option<&PublicKey> {
+ match &self.0 {
+ ShutdownScriptImpl::Legacy(pubkey) => Some(pubkey),
+ ShutdownScriptImpl::Bolt2(_) => None,
+ }
+ }
+
+ /// Returns whether the shutdown script is compatible with the features as defined by BOLT #2.
+ ///
+ /// Specifically, checks for compliance with feature `option_shutdown_anysegwit`.
+ pub fn is_compatible(&self, features: &InitFeatures) -> bool {
+ match &self.0 {
+ ShutdownScriptImpl::Legacy(_) => true,
+ ShutdownScriptImpl::Bolt2(script) => is_bolt2_compliant(script, features),
+ }
+ }
+}
+
+fn is_bolt2_compliant(script: &Script, features: &InitFeatures) -> bool {
+ if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wpkh() || script.is_v0_p2wsh() {
+ true
+ } else if features.supports_shutdown_anysegwit() {
+ script.is_witness_program() && script.as_bytes()[0] != SEGWIT_V0.into_u8()
+ } else {
+ false
+ }
+}
+
+impl TryFrom<Script> for ShutdownScript {
+ type Error = InvalidShutdownScript;
+
+ fn try_from(script: Script) -> Result<Self, Self::Error> {
+ Self::try_from((script, &InitFeatures::known()))
+ }
+}
+
+impl TryFrom<(Script, &InitFeatures)> for ShutdownScript {
+ type Error = InvalidShutdownScript;
+
+ fn try_from((script, features): (Script, &InitFeatures)) -> Result<Self, Self::Error> {
+ if is_bolt2_compliant(&script, features) {
+ Ok(Self(ShutdownScriptImpl::Bolt2(script)))
+ } else {
+ Err(InvalidShutdownScript { script })
+ }
+ }
+}
+
+impl Into<Script> for ShutdownScript {
+ fn into(self) -> Script {
+ match self.0 {
+ ShutdownScriptImpl::Legacy(pubkey) =>
+ Script::new_v0_wpkh(&WPubkeyHash::hash(&pubkey.serialize())),
+ ShutdownScriptImpl::Bolt2(script_pubkey) => script_pubkey,
+ }
+ }
+}
+
+impl core::fmt::Display for ShutdownScript{
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ match &self.0 {
+ ShutdownScriptImpl::Legacy(_) => self.clone().into_inner().fmt(f),
+ ShutdownScriptImpl::Bolt2(script) => script.fmt(f),
+ }
+ }
+}
+
+#[cfg(test)]
+mod shutdown_script_tests {
+ use super::ShutdownScript;
+ use bitcoin::bech32::u5;
+ use bitcoin::blockdata::opcodes;
+ use bitcoin::blockdata::script::{Builder, Script};
+ use bitcoin::secp256k1::Secp256k1;
+ use bitcoin::secp256k1::key::{PublicKey, SecretKey};
+ use ln::features::InitFeatures;
+ use core::convert::TryFrom;
+ use core::num::NonZeroU8;
+
+ fn pubkey() -> bitcoin::util::ecdsa::PublicKey {
+ let secp_ctx = Secp256k1::signing_only();
+ let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]).unwrap();
+ bitcoin::util::ecdsa::PublicKey::new(PublicKey::from_secret_key(&secp_ctx, &secret_key))
+ }
+
+ fn redeem_script() -> Script {
+ let pubkey = pubkey();
+ Builder::new()
+ .push_opcode(opcodes::all::OP_PUSHNUM_2)
+ .push_key(&pubkey)
+ .push_key(&pubkey)
+ .push_opcode(opcodes::all::OP_PUSHNUM_2)
+ .push_opcode(opcodes::all::OP_CHECKMULTISIG)
+ .into_script()
+ }
+
+ #[test]
+ fn generates_p2wpkh_from_pubkey() {
+ let pubkey = pubkey();
+ let pubkey_hash = pubkey.wpubkey_hash().unwrap();
+ let p2wpkh_script = Script::new_v0_wpkh(&pubkey_hash);
+
+ let shutdown_script = ShutdownScript::new_p2wpkh_from_pubkey(pubkey.key);
+ assert!(shutdown_script.is_compatible(&InitFeatures::known()));
+ assert!(shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
+ assert_eq!(shutdown_script.into_inner(), p2wpkh_script);
+ }
+
+ #[test]
+ fn generates_p2pkh_from_pubkey_hash() {
+ let pubkey_hash = pubkey().pubkey_hash();
+ let p2pkh_script = Script::new_p2pkh(&pubkey_hash);
+
+ let shutdown_script = ShutdownScript::new_p2pkh(&pubkey_hash);
+ assert!(shutdown_script.is_compatible(&InitFeatures::known()));
+ assert!(shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
+ assert_eq!(shutdown_script.into_inner(), p2pkh_script);
+ assert!(ShutdownScript::try_from(p2pkh_script).is_ok());
+ }
+
+ #[test]
+ fn generates_p2sh_from_script_hash() {
+ let script_hash = redeem_script().script_hash();
+ let p2sh_script = Script::new_p2sh(&script_hash);
+
+ let shutdown_script = ShutdownScript::new_p2sh(&script_hash);
+ assert!(shutdown_script.is_compatible(&InitFeatures::known()));
+ assert!(shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
+ assert_eq!(shutdown_script.into_inner(), p2sh_script);
+ assert!(ShutdownScript::try_from(p2sh_script).is_ok());
+ }
+
+ #[test]
+ fn generates_p2wpkh_from_pubkey_hash() {
+ let pubkey_hash = pubkey().wpubkey_hash().unwrap();
+ let p2wpkh_script = Script::new_v0_wpkh(&pubkey_hash);
+
+ let shutdown_script = ShutdownScript::new_p2wpkh(&pubkey_hash);
+ assert!(shutdown_script.is_compatible(&InitFeatures::known()));
+ assert!(shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
+ assert_eq!(shutdown_script.into_inner(), p2wpkh_script);
+ assert!(ShutdownScript::try_from(p2wpkh_script).is_ok());
+ }
+
+ #[test]
+ fn generates_p2wsh_from_script_hash() {
+ let script_hash = redeem_script().wscript_hash();
+ let p2wsh_script = Script::new_v0_wsh(&script_hash);
+
+ let shutdown_script = ShutdownScript::new_p2wsh(&script_hash);
+ assert!(shutdown_script.is_compatible(&InitFeatures::known()));
+ assert!(shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
+ assert_eq!(shutdown_script.into_inner(), p2wsh_script);
+ assert!(ShutdownScript::try_from(p2wsh_script).is_ok());
+ }
+
+ #[test]
+ fn generates_segwit_from_non_v0_witness_program() {
+ let version = u5::try_from_u8(16).unwrap();
+ let witness_program = Script::new_witness_program(version, &[0; 40]);
+
+ let version = NonZeroU8::new(version.to_u8()).unwrap();
+ let shutdown_script = ShutdownScript::new_witness_program(version, &[0; 40]).unwrap();
+ assert!(shutdown_script.is_compatible(&InitFeatures::known()));
+ assert!(!shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
+ assert_eq!(shutdown_script.into_inner(), witness_program);
+ }
+
+ #[test]
+ fn fails_from_unsupported_script() {
+ let op_return = Script::new_op_return(&[0; 42]);
+ assert!(ShutdownScript::try_from(op_return).is_err());
+ }
+
+ #[test]
+ fn fails_from_invalid_segwit_version() {
+ let version = NonZeroU8::new(17).unwrap();
+ assert!(ShutdownScript::new_witness_program(version, &[0; 40]).is_err());
+ }
+
+ #[test]
+ fn fails_from_invalid_segwit_v0_witness_program() {
+ let witness_program = Script::new_witness_program(u5::try_from_u8(0).unwrap(), &[0; 2]);
+ assert!(ShutdownScript::try_from(witness_program).is_err());
+ }
+
+ #[test]
+ fn fails_from_invalid_segwit_non_v0_witness_program() {
+ let version = u5::try_from_u8(16).unwrap();
+ let witness_program = Script::new_witness_program(version, &[0; 42]);
+ assert!(ShutdownScript::try_from(witness_program).is_err());
+
+ let version = NonZeroU8::new(version.to_u8()).unwrap();
+ assert!(ShutdownScript::new_witness_program(version, &[0; 42]).is_err());
+ }
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Tests of our shutdown and closing_signed negotiation logic.
+
+use chain::keysinterface::KeysInterface;
+use chain::transaction::OutPoint;
+use ln::{PaymentPreimage, PaymentHash};
+use ln::channelmanager::PaymentSendFailure;
+use routing::router::get_route;
+use ln::features::{InitFeatures, InvoiceFeatures};
+use ln::msgs;
+use ln::msgs::{ChannelMessageHandler, ErrorAction};
+use ln::script::ShutdownScript;
+use util::test_utils;
+use util::test_utils::OnGetShutdownScriptpubkey;
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::errors::APIError;
+use util::config::UserConfig;
+
+use bitcoin::blockdata::script::Builder;
+use bitcoin::blockdata::opcodes;
+
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::hashes::Hash;
+
+use regex;
+
+use core::default::Default;
+use core::num::NonZeroU8;
+
+use ln::functional_test_utils::*;
+use ln::msgs::OptionalField::Present;
+
+#[test]
+fn pre_funding_lock_shutdown_test() {
+ // Test sending a shutdown prior to funding_locked after funding generation
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::known(), InitFeatures::known());
+ mine_transaction(&nodes[0], &tx);
+ mine_transaction(&nodes[1], &tx);
+
+ nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
+
+ let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+ let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+ let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
+ let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ assert!(node_1_none.is_none());
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ assert!(nodes[1].node.list_channels().is_empty());
+}
+
+#[test]
+fn updates_shutdown_wait() {
+ // Test sending a shutdown with outstanding updates pending
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let logger = test_utils::TestLogger::new();
+
+ let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
+
+ nodes[0].node.close_channel(&chan_1.2).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
+
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
+
+ let net_graph_msg_handler0 = &nodes[0].net_graph_msg_handler;
+ let net_graph_msg_handler1 = &nodes[1].net_graph_msg_handler;
+ let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler0.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+ let route_2 = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler1.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+ unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
+ unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
+
+ assert!(nodes[2].node.claim_funds(our_payment_preimage));
+ check_added_monitors!(nodes[2], 1);
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ expect_payment_forwarded!(nodes[1], Some(1000), false);
+ check_added_monitors!(nodes[1], 1);
+ let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
+
+ assert!(updates_2.update_add_htlcs.is_empty());
+ assert!(updates_2.update_fail_htlcs.is_empty());
+ assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+ assert!(updates_2.update_fee.is_none());
+ assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(our_payment_preimage, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+ let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+ let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
+ let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ assert!(node_1_none.is_none());
+
+ assert!(nodes[0].node.list_channels().is_empty());
+
+ assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+ close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
+ assert!(nodes[1].node.list_channels().is_empty());
+ assert!(nodes[2].node.list_channels().is_empty());
+}
+
+#[test]
+fn htlc_fail_async_shutdown() {
+ // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let logger = test_utils::TestLogger::new();
+
+ let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
+ let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+ let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+ nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert_eq!(updates.update_add_htlcs.len(), 1);
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+
+ nodes[1].node.close_channel(&chan_1.2).unwrap();
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
+ check_added_monitors!(nodes[1], 1);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
+
+ let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates_2.update_add_htlcs.is_empty());
+ assert!(updates_2.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates_2.update_fail_htlcs.len(), 1);
+ assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+ assert!(updates_2.update_fee.is_none());
+
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+
+ expect_payment_failed!(nodes[0], our_payment_hash, false);
+
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 2);
+ match msg_events[0] {
+ MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
+ assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ let node_0_closing_signed = match msg_events[1] {
+ MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+ let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+ let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
+ let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ assert!(node_1_none.is_none());
+
+ assert!(nodes[0].node.list_channels().is_empty());
+
+ assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+ close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
+ assert!(nodes[1].node.list_channels().is_empty());
+ assert!(nodes[2].node.list_channels().is_empty());
+}
+
+fn do_test_shutdown_rebroadcast(recv_count: u8) {
+ // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
+ // messages delivered prior to disconnect
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+
+ let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
+
+ nodes[1].node.close_channel(&chan_1.2).unwrap();
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ if recv_count > 0 {
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ if recv_count > 1 {
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ }
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish);
+ let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ assert!(node_1_shutdown == node_1_2nd_shutdown);
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish);
+ let node_0_2nd_shutdown = if recv_count > 0 {
+ let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
+ node_0_2nd_shutdown
+ } else {
+ let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+ assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
+ get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
+ };
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown);
+
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ assert!(nodes[2].node.claim_funds(our_payment_preimage));
+ check_added_monitors!(nodes[2], 1);
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ expect_payment_forwarded!(nodes[1], Some(1000), false);
+ check_added_monitors!(nodes[1], 1);
+ let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
+
+ assert!(updates_2.update_add_htlcs.is_empty());
+ assert!(updates_2.update_fail_htlcs.is_empty());
+ assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+ assert!(updates_2.update_fee.is_none());
+ assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(our_payment_preimage, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ if recv_count > 0 {
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+ let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+ let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ assert!(node_0_2nd_closing_signed.is_some());
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ if recv_count == 0 {
+ // If all closing_signeds weren't delivered we can just resume where we left off...
+ let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish);
+ let node_0_msgs = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(node_0_msgs.len(), 2);
+ let node_0_2nd_closing_signed = match node_0_msgs[1] {
+ MessageSendEvent::SendClosingSigned { ref msg, .. } => {
+ assert_eq!(node_0_closing_signed, *msg);
+ msg.clone()
+ },
+ _ => panic!(),
+ };
+
+ let node_0_3rd_shutdown = match node_0_msgs[0] {
+ MessageSendEvent::SendShutdown { ref msg, .. } => {
+ assert_eq!(node_0_2nd_shutdown, *msg);
+ msg.clone()
+ },
+ _ => panic!(),
+ };
+ assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish);
+ let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
+
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown);
+
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed);
+ let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+ let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
+ let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ assert!(node_1_none.is_none());
+ } else {
+ // If one node, however, received + responded with an identical closing_signed we end
+ // up erroring and node[0] will try to broadcast its own latest commitment transaction.
+ // There isn't really anything better we can do simply, but in the future we might
+ // explore storing a set of recently-closed channels that got disconnected during
+ // closing_signed and avoiding broadcasting local commitment txn for some timeout to
+ // give our counterparty enough time to (potentially) broadcast a cooperative closing
+ // transaction.
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish);
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
+ match action {
+ &ErrorAction::SendErrorMessage { ref msg } => {
+ nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &msg);
+ assert_eq!(msg.channel_id, chan_1.2);
+ },
+ _ => panic!("Unexpected event!"),
+ }
+ } else { panic!("Needed SendErrorMessage close"); }
+
+ // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
+ // checks it, but in this case nodes[1] didn't ever get a chance to receive a
+ // closing_signed so we do it ourselves
+ check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
+ }
+
+ assert!(nodes[0].node.list_channels().is_empty());
+
+ assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+ close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
+ assert!(nodes[1].node.list_channels().is_empty());
+ assert!(nodes[2].node.list_channels().is_empty());
+}
+
+#[test]
+fn test_shutdown_rebroadcast() {
+ do_test_shutdown_rebroadcast(0);
+ do_test_shutdown_rebroadcast(1);
+ do_test_shutdown_rebroadcast(2);
+}
+
+#[test]
+fn test_upfront_shutdown_script() {
+ // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening
+ // enforce it at shutdown message
+
+ let mut config = UserConfig::default();
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.force_announced_channel_preference = false;
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+ let user_cfgs = [None, Some(config), None];
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
+ let flags = InitFeatures::known();
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
+ nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
+ node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
+ // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer
+ nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
+ check_added_monitors!(nodes[2], 1);
+
+ // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
+ nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
+ // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
+ nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ let events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+
+ // We test that if case of peer non-signaling we don't enforce committed script at channel opening
+ let flags_no = InitFeatures::known().clear_upfront_shutdown_script();
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone());
+ nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
+ check_added_monitors!(nodes[1], 1);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+
+ // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
+ // channel smoothly, opt-out is from channel initiator here
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone());
+ nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+
+ //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
+ //// channel smoothly
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone());
+ nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_unsupported_anysegwit_upfront_shutdown_script() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Use a non-v0 segwit script supported by option_shutdown_anysegwit
+ let node_features = InitFeatures::known().clear_shutdown_anysegwit();
+ let anysegwit_shutdown_script = Builder::new()
+ .push_int(16)
+ .push_slice(&[0, 40])
+ .into_script();
+
+ // Check script when handling an open_channel message
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+ let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ open_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), node_features.clone(), &open_channel);
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)");
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ // Check script when handling an accept_channel message
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+ let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+ let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ accept_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), node_features, &accept_channel);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ assert_eq!(node_id, nodes[1].node.get_our_node_id());
+ assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)");
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_invalid_upfront_shutdown_script() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+
+ // Use a segwit v0 script with an unsupported witness program
+ let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(0)
+ .push_slice(&[0, 0])
+ .into_script());
+ nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_0 OP_PUSHBYTES_2 0000)");
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_segwit_v0_shutdown_script() {
+ let mut config = UserConfig::default();
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.force_announced_channel_preference = false;
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+ let user_cfgs = [None, Some(config), None];
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ // Use a segwit v0 script supported even without option_shutdown_anysegwit
+ let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ node_0_shutdown.scriptpubkey = Builder::new().push_int(0)
+ .push_slice(&[0; 20])
+ .into_script();
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_anysegwit_shutdown_script() {
+ let mut config = UserConfig::default();
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.force_announced_channel_preference = false;
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+ let user_cfgs = [None, Some(config), None];
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ // Use a non-v0 segwit script supported by option_shutdown_anysegwit
+ let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
+ .push_slice(&[0, 0])
+ .into_script();
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_unsupported_anysegwit_shutdown_script() {
+ let mut config = UserConfig::default();
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.force_announced_channel_preference = false;
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+ let user_cfgs = [None, Some(config), None];
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ node_cfgs[0].features = InitFeatures::known().clear_shutdown_anysegwit();
+ node_cfgs[1].features = InitFeatures::known().clear_shutdown_anysegwit();
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Check that using an unsupported shutdown script fails and a supported one succeeds.
+ let supported_shutdown_script = chanmon_cfgs[1].keys_manager.get_shutdown_scriptpubkey();
+ let unsupported_shutdown_script =
+ ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap();
+ chanmon_cfgs[1].keys_manager
+ .expect(OnGetShutdownScriptpubkey { returns: unsupported_shutdown_script.clone() })
+ .expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script });
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, node_cfgs[0].features.clone(), node_cfgs[1].features.clone());
+ match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()) {
+ Err(APIError::IncompatibleShutdownScript { script }) => {
+ assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner());
+ },
+ Err(e) => panic!("Unexpected error: {:?}", e),
+ Ok(_) => panic!("Expected error"),
+ }
+ nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit
+ let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ node_0_shutdown.scriptpubkey = unsupported_shutdown_script.into_inner();
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_cfgs[1].features, &node_0_shutdown);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[1] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ assert_eq!(node_id, nodes[1].node.get_our_node_id());
+ assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020028) from remote peer".to_owned());
+ },
+ _ => panic!("Unexpected event"),
+ }
+ check_added_monitors!(nodes[0], 1);
+}
+
+#[test]
+fn test_invalid_shutdown_script() {
+ let mut config = UserConfig::default();
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.force_announced_channel_preference = false;
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+ let user_cfgs = [None, Some(config), None];
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ // Use a segwit v0 script with an unsupported witness program
+ let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ node_0_shutdown.scriptpubkey = Builder::new().push_int(0)
+ .push_slice(&[0, 0])
+ .into_script();
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[1] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ assert_eq!(node_id, nodes[1].node.get_our_node_id());
+ assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned())
+ },
+ _ => panic!("Unexpected event"),
+ }
+ check_added_monitors!(nodes[0], 1);
+}
+
+#[derive(PartialEq)]
+enum TimeoutStep {
+ AfterShutdown,
+ AfterClosingSigned,
+ NoTimeout,
+}
+
+fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
+ // The range-based closing signed negotiation allows the funder to restart the process with a
+ // new range if the previous range did not overlap. This allows implementations to request user
+ // intervention allowing users to enter a new fee range. We do not implement the sending side
+ // of this, instead opting to allow users to enter an explicit "willing to pay up to X to avoid
+ // force-closing" value and relying on that instead.
+ //
+ // Here we run test the fundee side of that restart mechanism, implementing the funder side of
+ // it manually.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
+
+ send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
+
+ nodes[0].node.close_channel(&chan_id).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
+
+ {
+ // Now we set nodes[1] to require a relatively high feerate for closing. This should result
+ // in it rejecting nodes[0]'s initial closing_signed, giving nodes[0] a chance to try
+ // again.
+ let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock *= 10;
+ }
+
+ let mut node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ // nodes[0] should use a "reasonable" feerate, well under the 10 sat/vByte that nodes[1] thinks
+ // is the current prevailing feerate.
+ assert!(node_0_closing_signed.fee_satoshis <= 500);
+
+ if timeout_step != TimeoutStep::AfterShutdown {
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+ // At this point nodes[1] should send back a warning message indicating it disagrees with the
+ // given channel-closing fee. Currently we do not implement warning messages so instead we
+ // remain silent here.
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Now deliver a mutated closing_signed indicating a higher acceptable fee range, which
+ // nodes[1] should happily accept and respond to.
+ node_0_closing_signed.fee_range.as_mut().unwrap().max_fee_satoshis *= 10;
+ {
+ let mut lock;
+ get_channel_ref!(nodes[0], lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10;
+ }
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+ let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+ let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ if timeout_step == TimeoutStep::NoTimeout {
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.1.unwrap());
+ }
+ }
+
+ if timeout_step != TimeoutStep::NoTimeout {
+ assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ } else {
+ assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ }
+
+ nodes[1].node.timer_tick_occurred();
+ nodes[1].node.timer_tick_occurred();
+
+ let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(txn.len(), 1);
+ assert_eq!(txn[0].output.len(), 2);
+
+ if timeout_step != TimeoutStep::NoTimeout {
+ assert!((txn[0].output[0].script_pubkey.is_v0_p2wpkh() &&
+ txn[0].output[1].script_pubkey.is_v0_p2wsh()) ||
+ (txn[0].output[1].script_pubkey.is_v0_p2wpkh() &&
+ txn[0].output[0].script_pubkey.is_v0_p2wsh()));
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+ } else {
+ assert!(txn[0].output[0].script_pubkey.is_v0_p2wpkh());
+ assert!(txn[0].output[1].script_pubkey.is_v0_p2wpkh());
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
+#[test]
+fn test_closing_signed_reinit_timeout() {
+ do_test_closing_signed_reinit_timeout(TimeoutStep::AfterShutdown);
+ do_test_closing_signed_reinit_timeout(TimeoutStep::AfterClosingSigned);
+ do_test_closing_signed_reinit_timeout(TimeoutStep::NoTimeout);
+}
+
+fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) {
+ // A simpe test of the legacy shutdown fee negotiation logic.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+
+ if high_initiator_fee {
+ // If high_initiator_fee is set, set nodes[0]'s feerate significantly higher. This
+ // shouldn't impact the flow at all given nodes[1] will happily accept the higher fee.
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock *= 10;
+ }
+
+ nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
+
+ let mut node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ node_0_closing_signed.fee_range = None;
+ if high_initiator_fee {
+ assert!(node_0_closing_signed.fee_satoshis > 500);
+ } else {
+ assert!(node_0_closing_signed.fee_satoshis < 500);
+ }
+
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+ let (_, mut node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ node_1_closing_signed.as_mut().unwrap().fee_range = None;
+
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
+ let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ assert!(node_0_none.is_none());
+}
+
+#[test]
+fn simple_legacy_shutdown_test() {
+ do_simple_legacy_shutdown_test(false);
+ do_simple_legacy_shutdown_test(true);
+}
+
+#[test]
+fn simple_target_feerate_shutdown() {
+ // Simple test of target in `close_channel_with_target_feerate`.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let chan_id = OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id();
+
+ nodes[0].node.close_channel_with_target_feerate(&chan_id, 253 * 10).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.close_channel_with_target_feerate(&chan_id, 253 * 5).unwrap();
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
+
+ let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+ let (_, node_1_closing_signed_opt) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ let node_1_closing_signed = node_1_closing_signed_opt.unwrap();
+
+ // nodes[1] was passed a target which was larger than the current channel feerate, which it
+ // should ignore in favor of the channel fee, as there is no use demanding a minimum higher
+ // than what will be paid on a force-close transaction. Note that we have to consider rounding,
+ // so only check that we're within 10 sats.
+ assert!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis >=
+ node_1_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis * 10 - 5);
+ assert!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis <=
+ node_1_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis * 10 + 5);
+
+ // Further, because nodes[0]'s target fee is larger than the `Normal` fee estimation plus our
+ // force-closure-avoidance buffer, min should equal max, and the nodes[1]-selected fee should
+ // be the nodes[0] only available fee.
+ assert_eq!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis,
+ node_0_closing_signed.fee_range.as_ref().unwrap().max_fee_satoshis);
+ assert_eq!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis,
+ node_0_closing_signed.fee_satoshis);
+ assert_eq!(node_0_closing_signed.fee_satoshis, node_1_closing_signed.fee_satoshis);
+
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+ let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ assert!(node_0_none.is_none());
+}
/// This cannot be changed after a channel has been initialized.
///
/// Default value: true.
- pub commit_upfront_shutdown_pubkey: bool
+ pub commit_upfront_shutdown_pubkey: bool,
+ /// Limit our total exposure to in-flight HTLCs which are burned to fees as they are too
+ /// small to claim on-chain.
+ ///
+ /// When an HTLC present in one of our channels is below a "dust" threshold, the HTLC will
+ /// not be claimable on-chain, instead being turned into additional miner fees if either
+ /// party force-closes the channel. Because the threshold is per-HTLC, our total exposure
+ /// to such payments may be sustantial if there are many dust HTLCs present when the
+ /// channel is force-closed.
+ ///
+ /// This limit is applied for sent, forwarded, and received HTLCs and limits the total
+ /// exposure across all three types per-channel. Setting this too low may prevent the
+ /// sending or receipt of low-value HTLCs on high-traffic nodes, and this limit is very
+ /// important to prevent stealing of dust HTLCs by miners.
+ ///
+ /// Default value: 5_000_000 msat.
+ pub max_dust_htlc_exposure_msat: u64,
+ /// The additional fee we're willing to pay to avoid waiting for the counterparty's
+ /// `to_self_delay` to reclaim funds.
+ ///
+ /// When we close a channel cooperatively with our counterparty, we negotiate a fee for the
+ /// closing transaction which both sides find acceptable, ultimately paid by the channel
+ /// funder/initiator.
+ ///
+ /// When we are the funder, because we have to pay the channel closing fee, we bound the
+ /// acceptable fee by our [`Background`] and [`Normal`] fees, with the upper bound increased by
+ /// this value. Because the on-chain fee we'd pay to force-close the channel is kept near our
+ /// [`Normal`] feerate during normal operation, this value represents the additional fee we're
+ /// willing to pay in order to avoid waiting for our counterparty's to_self_delay to reclaim our
+ /// funds.
+ ///
+ /// When we are not the funder, we require the closing transaction fee pay at least our
+ /// [`Background`] fee estimate, but allow our counterparty to pay as much fee as they like.
+ /// Thus, this value is ignored when we are not the funder.
+ ///
+ /// Default value: 1000 satoshis.
+ ///
+ /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+ pub force_close_avoidance_max_fee_satoshis: u64,
}
impl Default for ChannelConfig {
cltv_expiry_delta: 6 * 12, // 6 blocks/hour * 12 hours
announced_channel: false,
commit_upfront_shutdown_pubkey: true,
+ max_dust_htlc_exposure_msat: 5_000_000,
+ force_close_avoidance_max_fee_satoshis: 1000,
}
}
}
impl_writeable_tlv_based!(ChannelConfig, {
(0, forwarding_fee_proportional_millionths, required),
+ (1, max_dust_htlc_exposure_msat, (default_value, 5_000_000)),
(2, cltv_expiry_delta, required),
+ (3, force_close_avoidance_max_fee_satoshis, (default_value, 1000)),
(4, announced_channel, required),
(6, commit_upfront_shutdown_pubkey, required),
(8, forwarding_fee_base_msat, required),
//! Error types live here.
+use ln::script::ShutdownScript;
+
use alloc::string::String;
use core::fmt;
/// An attempt to call watch/update_channel returned an Err (ie you did this!), causing the
/// attempted action to fail.
MonitorUpdateFailed,
+ /// [`KeysInterface::get_shutdown_scriptpubkey`] returned a shutdown scriptpubkey incompatible
+ /// with the channel counterparty as negotiated in [`InitFeatures`].
+ ///
+ /// Using a SegWit v0 script should resolve this issue. If you cannot, you won't be able to open
+ /// a channel or cooperatively close one with this peer (and will have to force-close instead).
+ ///
+ /// [`KeysInterface::get_shutdown_scriptpubkey`]: crate::chain::keysinterface::KeysInterface::get_shutdown_scriptpubkey
+ /// [`InitFeatures`]: crate::ln::features::InitFeatures
+ IncompatibleShutdownScript {
+ /// The incompatible shutdown script.
+ script: ShutdownScript,
+ },
}
impl fmt::Debug for APIError {
APIError::RouteError {ref err} => f.write_str(err),
APIError::ChannelUnavailable {ref err} => f.write_str(err),
APIError::MonitorUpdateFailed => f.write_str("Client indicated a channel monitor update failed"),
+ APIError::IncompatibleShutdownScript { ref script } => {
+ write!(f, "Provided a scriptpubkey format not accepted by peer: {}", script)
+ },
}
}
}
#[macro_export]
macro_rules! log_internal {
($logger: expr, $lvl:expr, $($arg:tt)+) => (
- $logger.log(&$crate::util::logger::Record::new($lvl, format_args!($($arg)+), module_path!(), file!(), line!()));
+ $logger.log(&$crate::util::logger::Record::new($lvl, format_args!($($arg)+), module_path!(), file!(), line!()))
);
}
fn read<R: Read>(reader: &mut R) -> Result<Option<Self>, DecodeError>;
}
+impl<T: Readable> MaybeReadable for T {
+ #[inline]
+ fn read<R: Read>(reader: &mut R) -> Result<Option<T>, DecodeError> {
+ Ok(Some(Readable::read(reader)?))
+ }
+}
+
pub(crate) struct OptionDeserWrapper<T: Readable>(pub Option<T>);
impl<T: Readable> Readable for OptionDeserWrapper<T> {
#[inline]
}
/// Wrapper to read elements from a given stream until it reaches the end of the stream.
-pub(crate) struct VecReadWrapper<T: Readable>(pub Vec<T>);
-impl<T: Readable> Readable for VecReadWrapper<T> {
+pub(crate) struct VecReadWrapper<T>(pub Vec<T>);
+impl<T: MaybeReadable> Readable for VecReadWrapper<T> {
#[inline]
fn read<R: Read>(mut reader: &mut R) -> Result<Self, DecodeError> {
let mut values = Vec::new();
loop {
let mut track_read = ReadTrackingReader::new(&mut reader);
- match Readable::read(&mut track_read) {
- Ok(v) => { values.push(v); },
+ match MaybeReadable::read(&mut track_read) {
+ Ok(Some(v)) => { values.push(v); },
+ Ok(None) => { },
// If we failed to read any bytes at all, we reached the end of our TLV
// stream and have simply exhausted all entries.
Err(ref e) if e == &DecodeError::ShortRead && !track_read.have_read => break,
return Err(DecodeError::BadLengthDescriptor);
}
let mut ret = Vec::with_capacity(len as usize);
- for _ in 0..len { ret.push(Signature::read(r)?); }
+ for _ in 0..len { ret.push(Readable::read(r)?); }
Ok(ret)
}
}
impl<T: Readable> Readable for Option<T>
{
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
- match BigSize::read(r)?.0 {
+ let len: BigSize = Readable::read(r)?;
+ match len.0 {
0 => Ok(None),
len => {
let mut reader = FixedLengthReader::new(r, len - 1);
// licenses.
macro_rules! encode_tlv {
+ ($stream: expr, $type: expr, $field: expr, (default_value, $default: expr)) => {
+ encode_tlv!($stream, $type, $field, required)
+ };
($stream: expr, $type: expr, $field: expr, required) => {
BigSize($type).write($stream)?;
BigSize($field.serialized_length() as u64).write($stream)?;
}
macro_rules! encode_tlv_stream {
- ($stream: expr, {$(($type: expr, $field: expr, $fieldty: ident)),*}) => { {
+ ($stream: expr, {$(($type: expr, $field: expr, $fieldty: tt)),* $(,)*}) => { {
#[allow(unused_imports)]
use {
ln::msgs::DecodeError,
}
macro_rules! get_varint_length_prefixed_tlv_length {
+ ($len: expr, $type: expr, $field: expr, (default_value, $default: expr)) => {
+ get_varint_length_prefixed_tlv_length!($len, $type, $field, required)
+ };
($len: expr, $type: expr, $field: expr, required) => {
BigSize($type).write(&mut $len).expect("No in-memory data may fail to serialize");
let field_len = $field.serialized_length();
}
macro_rules! encode_varint_length_prefixed_tlv {
- ($stream: expr, {$(($type: expr, $field: expr, $fieldty: ident)),*}) => { {
+ ($stream: expr, {$(($type: expr, $field: expr, $fieldty: tt)),*}) => { {
use util::ser::BigSize;
let len = {
#[allow(unused_mut)]
}
macro_rules! check_tlv_order {
- ($last_seen_type: expr, $typ: expr, $type: expr, required) => {{
+ ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (default_value, $default: expr)) => {{
+ #[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true
+ let invalid_order = ($last_seen_type.is_none() || $last_seen_type.unwrap() < $type) && $typ.0 > $type;
+ if invalid_order {
+ $field = $default;
+ }
+ }};
+ ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, required) => {{
#[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true
let invalid_order = ($last_seen_type.is_none() || $last_seen_type.unwrap() < $type) && $typ.0 > $type;
if invalid_order {
return Err(DecodeError::InvalidValue);
}
}};
- ($last_seen_type: expr, $typ: expr, $type: expr, option) => {{
+ ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, option) => {{
// no-op
}};
- ($last_seen_type: expr, $typ: expr, $type: expr, vec_type) => {{
+ ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, vec_type) => {{
+ // no-op
+ }};
+ ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, ignorable) => {{
// no-op
}};
}
macro_rules! check_missing_tlv {
- ($last_seen_type: expr, $type: expr, required) => {{
+ ($last_seen_type: expr, $type: expr, $field: ident, (default_value, $default: expr)) => {{
+ #[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true
+ let missing_req_type = $last_seen_type.is_none() || $last_seen_type.unwrap() < $type;
+ if missing_req_type {
+ $field = $default;
+ }
+ }};
+ ($last_seen_type: expr, $type: expr, $field: ident, required) => {{
#[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true
let missing_req_type = $last_seen_type.is_none() || $last_seen_type.unwrap() < $type;
if missing_req_type {
return Err(DecodeError::InvalidValue);
}
}};
- ($last_seen_type: expr, $type: expr, vec_type) => {{
+ ($last_seen_type: expr, $type: expr, $field: ident, vec_type) => {{
+ // no-op
+ }};
+ ($last_seen_type: expr, $type: expr, $field: ident, option) => {{
// no-op
}};
- ($last_seen_type: expr, $type: expr, option) => {{
+ ($last_seen_type: expr, $type: expr, $field: ident, ignorable) => {{
// no-op
}};
}
macro_rules! decode_tlv {
+ ($reader: expr, $field: ident, (default_value, $default: expr)) => {{
+ decode_tlv!($reader, $field, required)
+ }};
($reader: expr, $field: ident, required) => {{
$field = ser::Readable::read(&mut $reader)?;
}};
($reader: expr, $field: ident, option) => {{
$field = Some(ser::Readable::read(&mut $reader)?);
}};
+ ($reader: expr, $field: ident, ignorable) => {{
+ $field = ser::MaybeReadable::read(&mut $reader)?;
+ }};
}
macro_rules! decode_tlv_stream {
- ($stream: expr, {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*}) => { {
+ ($stream: expr, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => { {
use ln::msgs::DecodeError;
let mut last_seen_type: Option<u64> = None;
+ let mut stream_ref = $stream;
'tlv_read: loop {
use util::ser;
// determine whether we should break or return ShortRead if we get an
// UnexpectedEof. This should in every case be largely cosmetic, but its nice to
// pass the TLV test vectors exactly, which requre this distinction.
- let mut tracking_reader = ser::ReadTrackingReader::new($stream);
+ let mut tracking_reader = ser::ReadTrackingReader::new(&mut stream_ref);
match ser::Readable::read(&mut tracking_reader) {
Err(DecodeError::ShortRead) => {
if !tracking_reader.have_read {
}
// As we read types, make sure we hit every required type:
$({
- check_tlv_order!(last_seen_type, typ, $type, $fieldty);
+ check_tlv_order!(last_seen_type, typ, $type, $field, $fieldty);
})*
last_seen_type = Some(typ.0);
// Finally, read the length and value itself:
- let length: ser::BigSize = ser::Readable::read($stream)?;
- let mut s = ser::FixedLengthReader::new($stream, length.0);
+ let length: ser::BigSize = ser::Readable::read(&mut stream_ref)?;
+ let mut s = ser::FixedLengthReader::new(&mut stream_ref, length.0);
match typ.0 {
$($type => {
decode_tlv!(s, $field, $fieldty);
}
// Make sure we got to each required type after we've read every TLV:
$({
- check_missing_tlv!(last_seen_type, $type, $fieldty);
+ check_missing_tlv!(last_seen_type, $type, $field, $fieldty);
})*
} }
}
/// This is the preferred method of adding new fields that old nodes can ignore and still function
/// correctly.
macro_rules! write_tlv_fields {
- ($stream: expr, {$(($type: expr, $field: expr, $fieldty: ident)),* $(,)*}) => {
+ ($stream: expr, {$(($type: expr, $field: expr, $fieldty: tt)),* $(,)*}) => {
encode_varint_length_prefixed_tlv!($stream, {$(($type, $field, $fieldty)),*});
}
}
/// Reads a suffix added by write_tlv_fields.
macro_rules! read_tlv_fields {
- ($stream: expr, {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*}) => { {
- let tlv_len = ::util::ser::BigSize::read($stream)?;
+ ($stream: expr, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => { {
+ let tlv_len: ::util::ser::BigSize = ::util::ser::Readable::read($stream)?;
let mut rd = ::util::ser::FixedLengthReader::new($stream, tlv_len.0);
decode_tlv_stream!(&mut rd, {$(($type, $field, $fieldty)),*});
rd.eat_remaining().map_err(|_| ::ln::msgs::DecodeError::ShortRead)?;
}
macro_rules! init_tlv_based_struct_field {
+ ($field: ident, (default_value, $default: expr)) => {
+ $field
+ };
($field: ident, option) => {
$field
};
}
macro_rules! init_tlv_field_var {
+ ($field: ident, (default_value, $default: expr)) => {
+ let mut $field = $default;
+ };
($field: ident, required) => {
let mut $field = ::util::ser::OptionDeserWrapper(None);
};
};
($field: ident, option) => {
let mut $field = None;
- }
+ };
}
/// Implements Readable/Writeable for a struct storing it as a set of TLVs
/// if $fieldty is `vec_type`, then $field is a Vec, which needs to have its individual elements
/// serialized.
macro_rules! impl_writeable_tlv_based {
- ($st: ident, {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*}) => {
+ ($st: ident, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => {
impl ::util::ser::Writeable for $st {
fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), $crate::io::Error> {
write_tlv_fields!(writer, {
}
}
-/// Implement Readable and Writeable for an enum, with struct variants stored as TLVs and tuple
-/// variants stored directly.
-/// The format is, for example
-/// impl_writeable_tlv_based_enum!(EnumName,
-/// (0, StructVariantA) => {(0, required_variant_field, required), (1, optional_variant_field, option)},
-/// (1, StructVariantB) => {(0, variant_field_a, required), (1, variant_field_b, required), (2, variant_vec_field, vec_type)};
-/// (2, TupleVariantA), (3, TupleVariantB),
-/// );
-/// The type is written as a single byte, followed by any variant data.
-/// Attempts to read an unknown type byte result in DecodeError::UnknownRequiredFeature.
-macro_rules! impl_writeable_tlv_based_enum {
+macro_rules! _impl_writeable_tlv_based_enum_common {
($st: ident, $(($variant_id: expr, $variant_name: ident) =>
- {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*}
+ {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
),* $(,)*;
$(($tuple_variant_id: expr, $tuple_variant_name: ident)),* $(,)*) => {
impl ::util::ser::Writeable for $st {
Ok(())
}
}
+ }
+}
+
+/// Implement MaybeReadable and Writeable for an enum, with struct variants stored as TLVs and
+/// tuple variants stored directly.
+///
+/// This is largely identical to `impl_writeable_tlv_based_enum`, except that odd variants will
+/// return `Ok(None)` instead of `Err(UnknownRequiredFeature)`. It should generally be preferred
+/// when `MaybeReadable` is practical instead of just `Readable` as it provides an upgrade path for
+/// new variants to be added which are simply ignored by existing clients.
+macro_rules! impl_writeable_tlv_based_enum_upgradable {
+ ($st: ident, $(($variant_id: expr, $variant_name: ident) =>
+ {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
+ ),* $(,)*) => {
+ _impl_writeable_tlv_based_enum_common!($st,
+ $(($variant_id, $variant_name) => {$(($type, $field, $fieldty)),*}),*; );
+
+ impl ::util::ser::MaybeReadable for $st {
+ fn read<R: $crate::io::Read>(reader: &mut R) -> Result<Option<Self>, ::ln::msgs::DecodeError> {
+ let id: u8 = ::util::ser::Readable::read(reader)?;
+ match id {
+ $($variant_id => {
+ // Because read_tlv_fields creates a labeled loop, we cannot call it twice
+ // in the same function body. Instead, we define a closure and call it.
+ let f = || {
+ $(
+ init_tlv_field_var!($field, $fieldty);
+ )*
+ read_tlv_fields!(reader, {
+ $(($type, $field, $fieldty)),*
+ });
+ Ok(Some($st::$variant_name {
+ $(
+ $field: init_tlv_based_struct_field!($field, $fieldty)
+ ),*
+ }))
+ };
+ f()
+ }),*
+ _ if id % 2 == 1 => Ok(None),
+ _ => Err(DecodeError::UnknownRequiredFeature),
+ }
+ }
+ }
+
+ }
+}
+
+/// Implement Readable and Writeable for an enum, with struct variants stored as TLVs and tuple
+/// variants stored directly.
+/// The format is, for example
+/// impl_writeable_tlv_based_enum!(EnumName,
+/// (0, StructVariantA) => {(0, required_variant_field, required), (1, optional_variant_field, option)},
+/// (1, StructVariantB) => {(0, variant_field_a, required), (1, variant_field_b, required), (2, variant_vec_field, vec_type)};
+/// (2, TupleVariantA), (3, TupleVariantB),
+/// );
+/// The type is written as a single byte, followed by any variant data.
+/// Attempts to read an unknown type byte result in DecodeError::UnknownRequiredFeature.
+macro_rules! impl_writeable_tlv_based_enum {
+ ($st: ident, $(($variant_id: expr, $variant_name: ident) =>
+ {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
+ ),* $(,)*;
+ $(($tuple_variant_id: expr, $tuple_variant_name: ident)),* $(,)*) => {
+ _impl_writeable_tlv_based_enum_common!($st,
+ $(($variant_id, $variant_name) => {$(($type, $field, $fieldty)),*}),*;
+ $(($tuple_variant_id, $tuple_variant_name)),*);
impl ::util::ser::Readable for $st {
fn read<R: $crate::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
use ln::features::{ChannelFeatures, InitFeatures};
use ln::msgs;
use ln::msgs::OptionalField;
+use ln::script::ShutdownScript;
use util::enforcing_trait_impls::{EnforcingSigner, INITIAL_REVOKED_COMMITMENT_NUMBER};
use util::events;
use util::logger::{Logger, Level, Record};
fn get_node_secret(&self) -> SecretKey { unreachable!(); }
fn get_destination_script(&self) -> Script { unreachable!(); }
- fn get_shutdown_pubkey(&self) -> PublicKey { unreachable!(); }
+ fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { unreachable!(); }
fn get_channel_signer(&self, _inbound: bool, _channel_value_satoshis: u64) -> EnforcingSigner { unreachable!(); }
fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
pub override_channel_id_priv: Mutex<Option<[u8; 32]>>,
pub disable_revocation_policy_check: bool,
revoked_commitments: Mutex<HashMap<[u8;32], Arc<Mutex<u64>>>>,
+ expectations: Mutex<Option<VecDeque<OnGetShutdownScriptpubkey>>>,
}
impl keysinterface::KeysInterface for TestKeysInterface {
fn get_node_secret(&self) -> SecretKey { self.backing.get_node_secret() }
fn get_destination_script(&self) -> Script { self.backing.get_destination_script() }
- fn get_shutdown_pubkey(&self) -> PublicKey { self.backing.get_shutdown_pubkey() }
+
+ fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
+ match &mut *self.expectations.lock().unwrap() {
+ None => self.backing.get_shutdown_scriptpubkey(),
+ Some(expectations) => match expectations.pop_front() {
+ None => panic!("Unexpected get_shutdown_scriptpubkey"),
+ Some(expectation) => expectation.returns,
+ },
+ }
+ }
+
fn get_channel_signer(&self, inbound: bool, channel_value_satoshis: u64) -> EnforcingSigner {
let keys = self.backing.get_channel_signer(inbound, channel_value_satoshis);
let revoked_commitment = self.make_revoked_commitment_cell(keys.commitment_seed);
}
}
-
impl TestKeysInterface {
pub fn new(seed: &[u8; 32], network: Network) -> Self {
let now = Duration::from_secs(genesis_block(network).header.time as u64);
override_channel_id_priv: Mutex::new(None),
disable_revocation_policy_check: false,
revoked_commitments: Mutex::new(HashMap::new()),
+ expectations: Mutex::new(None),
}
}
+
+ /// Sets an expectation that [`keysinterface::KeysInterface::get_shutdown_scriptpubkey`] is
+ /// called.
+ pub fn expect(&self, expectation: OnGetShutdownScriptpubkey) -> &Self {
+ self.expectations.lock().unwrap()
+ .get_or_insert_with(|| VecDeque::new())
+ .push_back(expectation);
+ self
+ }
+
pub fn derive_channel_keys(&self, channel_value_satoshis: u64, id: &[u8; 32]) -> EnforcingSigner {
let keys = self.backing.derive_channel_keys(channel_value_satoshis, id);
let revoked_commitment = self.make_revoked_commitment_cell(keys.commitment_seed);
}
}
+impl Drop for TestKeysInterface {
+ fn drop(&mut self) {
+ if std::thread::panicking() {
+ return;
+ }
+
+ if let Some(expectations) = &*self.expectations.lock().unwrap() {
+ if !expectations.is_empty() {
+ panic!("Unsatisfied expectations: {:?}", expectations);
+ }
+ }
+ }
+}
+
+/// An expectation that [`keysinterface::KeysInterface::get_shutdown_scriptpubkey`] was called and
+/// returns a [`ShutdownScript`].
+pub struct OnGetShutdownScriptpubkey {
+ /// A shutdown script used to close a channel.
+ pub returns: ShutdownScript,
+}
+
+impl core::fmt::Debug for OnGetShutdownScriptpubkey {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ f.debug_struct("OnGetShutdownScriptpubkey").finish()
+ }
+}
+
pub struct TestChainSource {
pub genesis_hash: BlockHash,
pub utxo_ret: Mutex<Result<TxOut, chain::AccessError>>,