1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
22 use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters};
30 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::test_channel_signer::TestChannelSigner;
34 use crate::util::test_utils::{self, WatchtowerPersister};
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::locktime::absolute::LockTime;
42 use bitcoin::blockdata::script::{Builder, ScriptBuf};
43 use bitcoin::blockdata::opcodes;
44 use bitcoin::blockdata::constants::ChainHash;
45 use bitcoin::network::Network;
46 use bitcoin::{Amount, Sequence, Transaction, TxIn, TxOut, Witness};
47 use bitcoin::OutPoint as BitcoinOutPoint;
48 use bitcoin::transaction::Version;
50 use bitcoin::secp256k1::Secp256k1;
51 use bitcoin::secp256k1::{PublicKey,SecretKey};
54 use crate::prelude::*;
55 use alloc::collections::BTreeSet;
56 use core::iter::repeat;
57 use bitcoin::hashes::Hash;
58 use crate::sync::{Arc, Mutex, RwLock};
60 use crate::ln::functional_test_utils::*;
61 use crate::ln::chan_utils::CommitmentTransaction;
63 use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
66 fn test_channel_resumption_fail_post_funding() {
67 // If we fail to exchange funding with a peer prior to it disconnecting we'll resume the
68 // channel open on reconnect, however if we do exchange funding we do not currently support
69 // replaying it and here test that the channel closes.
70 let chanmon_cfgs = create_chanmon_cfgs(2);
71 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
72 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
73 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
75 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 0, 42, None, None).unwrap();
76 let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
77 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan);
78 let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
79 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan);
81 let (temp_chan_id, tx, funding_output) =
82 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
83 let new_chan_id = ChannelId::v1_from_funding_outpoint(funding_output);
84 nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
86 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
87 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)]);
89 // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that
91 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
92 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
94 assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
98 fn test_insane_channel_opens() {
99 // Stand up a network of 2 nodes
100 use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
101 let mut cfg = UserConfig::default();
102 cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
103 let chanmon_cfgs = create_chanmon_cfgs(2);
104 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
105 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
106 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
108 // Instantiate channel parameters where we push the maximum msats given our
110 let channel_value_sat = 31337; // same as funding satoshis
111 let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
112 let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
114 // Have node0 initiate a channel to node1 with aforementioned parameters
115 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap();
117 // Extract the channel open message from node0 to node1
118 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
120 // Test helper that asserts we get the correct error string given a mutator
121 // that supposedly makes the channel open message insane
122 let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
123 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
124 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
125 assert_eq!(msg_events.len(), 1);
126 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
127 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
129 &ErrorAction::SendErrorMessage { .. } => {
130 nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
132 _ => panic!("unexpected event!"),
134 } else { assert!(false); }
137 use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
139 // Test all mutations that would make the channel open message insane
140 insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
141 insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
143 insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
145 insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
147 insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
149 insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
151 insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
153 insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
155 insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
159 fn test_funding_exceeds_no_wumbo_limit() {
160 // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
162 use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
163 let chanmon_cfgs = create_chanmon_cfgs(2);
164 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
165 *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
166 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
167 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
169 match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) {
170 Err(APIError::APIMisuseError { err }) => {
171 assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
177 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
178 // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
179 // but only for them. Because some LSPs do it with some level of trust of the clients (for a
180 // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
181 // in normal testing, we test it explicitly here.
182 let chanmon_cfgs = create_chanmon_cfgs(2);
183 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
184 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
185 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
186 let default_config = UserConfig::default();
188 // Have node0 initiate a channel to node1 with aforementioned parameters
189 let mut push_amt = 100_000_000;
190 let feerate_per_kw = 253;
191 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
192 push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
193 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
195 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap();
196 let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
197 if !send_from_initiator {
198 open_channel_message.channel_reserve_satoshis = 0;
199 open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
201 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
203 // Extract the channel accept message from node1 to node0
204 let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
205 if send_from_initiator {
206 accept_channel_message.channel_reserve_satoshis = 0;
207 accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
209 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
211 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
212 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
213 let mut sender_node_per_peer_lock;
214 let mut sender_node_peer_state_lock;
216 let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
217 match channel_phase {
218 ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
219 let chan_context = channel_phase.context_mut();
220 chan_context.holder_selected_channel_reserve_satoshis = 0;
221 chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
227 let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
228 let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
229 create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
231 // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
232 // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
233 if send_from_initiator {
234 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
235 // Note that for outbound channels we have to consider the commitment tx fee and the
236 // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
237 // well as an additional HTLC.
238 - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
240 send_payment(&nodes[1], &[&nodes[0]], push_amt);
245 fn test_counterparty_no_reserve() {
246 do_test_counterparty_no_reserve(true);
247 do_test_counterparty_no_reserve(false);
251 fn test_async_inbound_update_fee() {
252 let chanmon_cfgs = create_chanmon_cfgs(2);
253 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
254 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
255 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
256 create_announced_chan_between_nodes(&nodes, 0, 1);
259 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
263 // send (1) commitment_signed -.
264 // <- update_add_htlc/commitment_signed
265 // send (2) RAA (awaiting remote revoke) -.
266 // (1) commitment_signed is delivered ->
267 // .- send (3) RAA (awaiting remote revoke)
268 // (2) RAA is delivered ->
269 // .- send (4) commitment_signed
270 // <- (3) RAA is delivered
271 // send (5) commitment_signed -.
272 // <- (4) commitment_signed is delivered
274 // (5) commitment_signed is delivered ->
276 // (6) RAA is delivered ->
278 // First nodes[0] generates an update_fee
280 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
283 nodes[0].node.timer_tick_occurred();
284 check_added_monitors!(nodes[0], 1);
286 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
287 assert_eq!(events_0.len(), 1);
288 let (update_msg, commitment_signed) = match events_0[0] { // (1)
289 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
290 (update_fee.as_ref(), commitment_signed)
292 _ => panic!("Unexpected event"),
295 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
297 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
298 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
299 nodes[1].node.send_payment_with_route(&route, our_payment_hash,
300 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
301 check_added_monitors!(nodes[1], 1);
303 let payment_event = {
304 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
305 assert_eq!(events_1.len(), 1);
306 SendEvent::from_event(events_1.remove(0))
308 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
309 assert_eq!(payment_event.msgs.len(), 1);
311 // ...now when the messages get delivered everyone should be happy
312 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
313 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
314 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
315 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
316 check_added_monitors!(nodes[0], 1);
318 // deliver(1), generate (3):
319 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
320 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
321 // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
322 check_added_monitors!(nodes[1], 1);
324 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
325 let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
326 assert!(bs_update.update_add_htlcs.is_empty()); // (4)
327 assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
328 assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
329 assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
330 assert!(bs_update.update_fee.is_none()); // (4)
331 check_added_monitors!(nodes[1], 1);
333 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
334 let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
335 assert!(as_update.update_add_htlcs.is_empty()); // (5)
336 assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
337 assert!(as_update.update_fail_htlcs.is_empty()); // (5)
338 assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
339 assert!(as_update.update_fee.is_none()); // (5)
340 check_added_monitors!(nodes[0], 1);
342 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
343 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
344 // only (6) so get_event_msg's assert(len == 1) passes
345 check_added_monitors!(nodes[0], 1);
347 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
348 let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
349 check_added_monitors!(nodes[1], 1);
351 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
352 check_added_monitors!(nodes[0], 1);
354 let events_2 = nodes[0].node.get_and_clear_pending_events();
355 assert_eq!(events_2.len(), 1);
357 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
358 _ => panic!("Unexpected event"),
361 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
362 check_added_monitors!(nodes[1], 1);
366 fn test_update_fee_unordered_raa() {
367 // Just the intro to the previous test followed by an out-of-order RAA (which caused a
368 // crash in an earlier version of the update_fee patch)
369 let chanmon_cfgs = create_chanmon_cfgs(2);
370 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
371 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
372 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
373 create_announced_chan_between_nodes(&nodes, 0, 1);
376 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
378 // First nodes[0] generates an update_fee
380 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
383 nodes[0].node.timer_tick_occurred();
384 check_added_monitors!(nodes[0], 1);
386 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
387 assert_eq!(events_0.len(), 1);
388 let update_msg = match events_0[0] { // (1)
389 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
392 _ => panic!("Unexpected event"),
395 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
397 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
398 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
399 nodes[1].node.send_payment_with_route(&route, our_payment_hash,
400 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
401 check_added_monitors!(nodes[1], 1);
403 let payment_event = {
404 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
405 assert_eq!(events_1.len(), 1);
406 SendEvent::from_event(events_1.remove(0))
408 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
409 assert_eq!(payment_event.msgs.len(), 1);
411 // ...now when the messages get delivered everyone should be happy
412 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
413 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
414 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
415 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
416 check_added_monitors!(nodes[0], 1);
418 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
419 check_added_monitors!(nodes[1], 1);
421 // We can't continue, sadly, because our (1) now has a bogus signature
425 fn test_multi_flight_update_fee() {
426 let chanmon_cfgs = create_chanmon_cfgs(2);
427 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
428 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
429 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
430 create_announced_chan_between_nodes(&nodes, 0, 1);
433 // update_fee/commitment_signed ->
434 // .- send (1) RAA and (2) commitment_signed
435 // update_fee (never committed) ->
437 // We have to manually generate the above update_fee, it is allowed by the protocol but we
438 // don't track which updates correspond to which revoke_and_ack responses so we're in
439 // AwaitingRAA mode and will not generate the update_fee yet.
440 // <- (1) RAA delivered
441 // (3) is generated and send (4) CS -.
442 // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
443 // know the per_commitment_point to use for it.
444 // <- (2) commitment_signed delivered
446 // B should send no response here
447 // (4) commitment_signed delivered ->
448 // <- RAA/commitment_signed delivered
451 // First nodes[0] generates an update_fee
454 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
455 initial_feerate = *feerate_lock;
456 *feerate_lock = initial_feerate + 20;
458 nodes[0].node.timer_tick_occurred();
459 check_added_monitors!(nodes[0], 1);
461 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
462 assert_eq!(events_0.len(), 1);
463 let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
464 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
465 (update_fee.as_ref().unwrap(), commitment_signed)
467 _ => panic!("Unexpected event"),
470 // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
471 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
472 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
473 let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
474 check_added_monitors!(nodes[1], 1);
476 // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
479 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
480 *feerate_lock = initial_feerate + 40;
482 nodes[0].node.timer_tick_occurred();
483 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
484 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
486 // Create the (3) update_fee message that nodes[0] will generate before it does...
487 let mut update_msg_2 = msgs::UpdateFee {
488 channel_id: update_msg_1.channel_id.clone(),
489 feerate_per_kw: (initial_feerate + 30) as u32,
492 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
494 update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
496 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
498 // Deliver (1), generating (3) and (4)
499 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
500 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
501 check_added_monitors!(nodes[0], 1);
502 assert!(as_second_update.update_add_htlcs.is_empty());
503 assert!(as_second_update.update_fulfill_htlcs.is_empty());
504 assert!(as_second_update.update_fail_htlcs.is_empty());
505 assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
506 // Check that the update_fee newly generated matches what we delivered:
507 assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
508 assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
510 // Deliver (2) commitment_signed
511 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
512 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
513 check_added_monitors!(nodes[0], 1);
514 // No commitment_signed so get_event_msg's assert(len == 1) passes
516 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
517 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
518 check_added_monitors!(nodes[1], 1);
521 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
522 let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
523 check_added_monitors!(nodes[1], 1);
525 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
526 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
527 check_added_monitors!(nodes[0], 1);
529 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
530 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
531 // No commitment_signed so get_event_msg's assert(len == 1) passes
532 check_added_monitors!(nodes[0], 1);
534 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
535 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
536 check_added_monitors!(nodes[1], 1);
539 fn do_test_sanity_on_in_flight_opens(steps: u8) {
540 // Previously, we had issues deserializing channels when we hadn't connected the first block
541 // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
542 // serialization round-trips and simply do steps towards opening a channel and then drop the
545 let chanmon_cfgs = create_chanmon_cfgs(2);
546 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
547 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
548 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
550 if steps & 0b1000_0000 != 0{
551 let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
552 connect_block(&nodes[0], &block);
553 connect_block(&nodes[1], &block);
556 if steps & 0x0f == 0 { return; }
557 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
558 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
560 if steps & 0x0f == 1 { return; }
561 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
562 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
564 if steps & 0x0f == 2 { return; }
565 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
567 let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
569 if steps & 0x0f == 3 { return; }
570 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
571 check_added_monitors!(nodes[0], 0);
572 let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
574 if steps & 0x0f == 4 { return; }
575 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
577 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
578 assert_eq!(added_monitors.len(), 1);
579 assert_eq!(added_monitors[0].0, funding_output);
580 added_monitors.clear();
582 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
584 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
586 if steps & 0x0f == 5 { return; }
587 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
589 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
590 assert_eq!(added_monitors.len(), 1);
591 assert_eq!(added_monitors[0].0, funding_output);
592 added_monitors.clear();
595 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
596 let events_4 = nodes[0].node.get_and_clear_pending_events();
597 assert_eq!(events_4.len(), 0);
599 if steps & 0x0f == 6 { return; }
600 create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
602 if steps & 0x0f == 7 { return; }
603 confirm_transaction_at(&nodes[0], &tx, 2);
604 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
605 create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
606 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
610 fn test_sanity_on_in_flight_opens() {
611 do_test_sanity_on_in_flight_opens(0);
612 do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
613 do_test_sanity_on_in_flight_opens(1);
614 do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
615 do_test_sanity_on_in_flight_opens(2);
616 do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
617 do_test_sanity_on_in_flight_opens(3);
618 do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
619 do_test_sanity_on_in_flight_opens(4);
620 do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
621 do_test_sanity_on_in_flight_opens(5);
622 do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
623 do_test_sanity_on_in_flight_opens(6);
624 do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
625 do_test_sanity_on_in_flight_opens(7);
626 do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
627 do_test_sanity_on_in_flight_opens(8);
628 do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
632 fn test_update_fee_vanilla() {
633 let chanmon_cfgs = create_chanmon_cfgs(2);
634 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
635 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
636 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
637 create_announced_chan_between_nodes(&nodes, 0, 1);
640 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
643 nodes[0].node.timer_tick_occurred();
644 check_added_monitors!(nodes[0], 1);
646 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
647 assert_eq!(events_0.len(), 1);
648 let (update_msg, commitment_signed) = match events_0[0] {
649 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
650 (update_fee.as_ref(), commitment_signed)
652 _ => panic!("Unexpected event"),
654 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
656 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
657 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
658 check_added_monitors!(nodes[1], 1);
660 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
661 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
662 check_added_monitors!(nodes[0], 1);
664 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
665 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
666 // No commitment_signed so get_event_msg's assert(len == 1) passes
667 check_added_monitors!(nodes[0], 1);
669 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
670 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
671 check_added_monitors!(nodes[1], 1);
675 fn test_update_fee_that_funder_cannot_afford() {
676 let chanmon_cfgs = create_chanmon_cfgs(2);
677 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
678 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
679 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
680 let channel_value = 5000;
682 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
683 let channel_id = chan.2;
684 let secp_ctx = Secp256k1::new();
685 let default_config = UserConfig::default();
686 let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
688 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
690 // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
691 // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
692 // calculate two different feerates here - the expected local limit as well as the expected
694 let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
695 let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
697 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
698 *feerate_lock = feerate;
700 nodes[0].node.timer_tick_occurred();
701 check_added_monitors!(nodes[0], 1);
702 let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
704 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
706 commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
708 // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
710 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
712 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
713 assert_eq!(commitment_tx.output.len(), 2);
714 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
715 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat());
716 actual_fee = channel_value - actual_fee;
717 assert_eq!(total_fee, actual_fee);
721 // Increment the feerate by a small constant, accounting for rounding errors
722 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
725 nodes[0].node.timer_tick_occurred();
726 nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
727 check_added_monitors!(nodes[0], 0);
729 const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
731 // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
732 // needed to sign the new commitment tx and (2) sign the new commitment tx.
733 let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
734 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
735 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
736 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
737 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
738 ).flatten().unwrap();
739 let chan_signer = local_chan.get_signer();
740 let pubkeys = chan_signer.as_ref().pubkeys();
741 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
742 pubkeys.funding_pubkey)
744 let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
745 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
746 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
747 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
748 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
749 ).flatten().unwrap();
750 let chan_signer = remote_chan.get_signer();
751 let pubkeys = chan_signer.as_ref().pubkeys();
752 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
753 chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
754 pubkeys.funding_pubkey)
757 // Assemble the set of keys we can use for signatures for our commitment_signed message.
758 let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
759 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
762 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
763 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
764 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
765 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
766 ).flatten().unwrap();
767 let local_chan_signer = local_chan.get_signer();
768 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
769 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
770 INITIAL_COMMITMENT_NUMBER - 1,
772 channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
773 local_funding, remote_funding,
774 commit_tx_keys.clone(),
775 non_buffer_feerate + 4,
777 &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
779 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
782 let commit_signed_msg = msgs::CommitmentSigned {
785 htlc_signatures: res.1,
787 partial_signature_with_nonce: None,
790 let update_fee = msgs::UpdateFee {
792 feerate_per_kw: non_buffer_feerate + 4,
795 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
797 //While producing the commitment_signed response after handling a received update_fee request the
798 //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
799 //Should produce and error.
800 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
801 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3);
802 check_added_monitors!(nodes[1], 1);
803 check_closed_broadcast!(nodes[1], true);
804 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
805 [nodes[0].node.get_our_node_id()], channel_value);
809 fn test_update_fee_with_fundee_update_add_htlc() {
810 let chanmon_cfgs = create_chanmon_cfgs(2);
811 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
812 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
813 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
814 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
817 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
820 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
823 nodes[0].node.timer_tick_occurred();
824 check_added_monitors!(nodes[0], 1);
826 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
827 assert_eq!(events_0.len(), 1);
828 let (update_msg, commitment_signed) = match events_0[0] {
829 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
830 (update_fee.as_ref(), commitment_signed)
832 _ => panic!("Unexpected event"),
834 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
835 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
836 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
837 check_added_monitors!(nodes[1], 1);
839 let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
841 // nothing happens since node[1] is in AwaitingRemoteRevoke
842 nodes[1].node.send_payment_with_route(&route, our_payment_hash,
843 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
845 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
846 assert_eq!(added_monitors.len(), 0);
847 added_monitors.clear();
849 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
850 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
851 // node[1] has nothing to do
853 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
854 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
855 check_added_monitors!(nodes[0], 1);
857 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
858 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
859 // No commitment_signed so get_event_msg's assert(len == 1) passes
860 check_added_monitors!(nodes[0], 1);
861 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
862 check_added_monitors!(nodes[1], 1);
863 // AwaitingRemoteRevoke ends here
865 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
866 assert_eq!(commitment_update.update_add_htlcs.len(), 1);
867 assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
868 assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
869 assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
870 assert_eq!(commitment_update.update_fee.is_none(), true);
872 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
873 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
874 check_added_monitors!(nodes[0], 1);
875 let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
877 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
878 check_added_monitors!(nodes[1], 1);
879 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
881 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
882 check_added_monitors!(nodes[1], 1);
883 let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
884 // No commitment_signed so get_event_msg's assert(len == 1) passes
886 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
887 check_added_monitors!(nodes[0], 1);
888 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
890 expect_pending_htlcs_forwardable!(nodes[0]);
892 let events = nodes[0].node.get_and_clear_pending_events();
893 assert_eq!(events.len(), 1);
895 Event::PaymentClaimable { .. } => { },
896 _ => panic!("Unexpected event"),
899 claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
901 send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
902 send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
903 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
904 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
905 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
909 fn test_update_fee() {
910 let chanmon_cfgs = create_chanmon_cfgs(2);
911 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
912 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
913 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
914 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
915 let channel_id = chan.2;
918 // (1) update_fee/commitment_signed ->
919 // <- (2) revoke_and_ack
920 // .- send (3) commitment_signed
921 // (4) update_fee/commitment_signed ->
922 // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
923 // <- (3) commitment_signed delivered
924 // send (6) revoke_and_ack -.
925 // <- (5) deliver revoke_and_ack
926 // (6) deliver revoke_and_ack ->
927 // .- send (7) commitment_signed in response to (4)
928 // <- (7) deliver commitment_signed
931 // Create and deliver (1)...
934 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
935 feerate = *feerate_lock;
936 *feerate_lock = feerate + 20;
938 nodes[0].node.timer_tick_occurred();
939 check_added_monitors!(nodes[0], 1);
941 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
942 assert_eq!(events_0.len(), 1);
943 let (update_msg, commitment_signed) = match events_0[0] {
944 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
945 (update_fee.as_ref(), commitment_signed)
947 _ => panic!("Unexpected event"),
949 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
951 // Generate (2) and (3):
952 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
953 let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
954 check_added_monitors!(nodes[1], 1);
957 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
958 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
959 check_added_monitors!(nodes[0], 1);
961 // Create and deliver (4)...
963 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
964 *feerate_lock = feerate + 30;
966 nodes[0].node.timer_tick_occurred();
967 check_added_monitors!(nodes[0], 1);
968 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
969 assert_eq!(events_0.len(), 1);
970 let (update_msg, commitment_signed) = match events_0[0] {
971 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
972 (update_fee.as_ref(), commitment_signed)
974 _ => panic!("Unexpected event"),
977 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
978 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
979 check_added_monitors!(nodes[1], 1);
981 let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
982 // No commitment_signed so get_event_msg's assert(len == 1) passes
984 // Handle (3), creating (6):
985 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
986 check_added_monitors!(nodes[0], 1);
987 let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
988 // No commitment_signed so get_event_msg's assert(len == 1) passes
991 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
992 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
993 check_added_monitors!(nodes[0], 1);
995 // Deliver (6), creating (7):
996 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
997 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
998 assert!(commitment_update.update_add_htlcs.is_empty());
999 assert!(commitment_update.update_fulfill_htlcs.is_empty());
1000 assert!(commitment_update.update_fail_htlcs.is_empty());
1001 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
1002 assert!(commitment_update.update_fee.is_none());
1003 check_added_monitors!(nodes[1], 1);
1006 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
1007 check_added_monitors!(nodes[0], 1);
1008 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1009 // No commitment_signed so get_event_msg's assert(len == 1) passes
1011 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
1012 check_added_monitors!(nodes[1], 1);
1013 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1015 assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
1016 assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
1017 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
1018 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1019 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1023 fn fake_network_test() {
1024 // Simple test which builds a network of ChannelManagers, connects them to each other, and
1025 // tests that payments get routed and transactions broadcast in semi-reasonable ways.
1026 let chanmon_cfgs = create_chanmon_cfgs(4);
1027 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1028 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1029 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1031 // Create some initial channels
1032 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1033 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1034 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
1036 // Rebalance the network a bit by relaying one payment through all the channels...
1037 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1038 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1039 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1040 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1042 // Send some more payments
1043 send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1044 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1045 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1047 // Test failure packets
1048 let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1049 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1051 // Add a new channel that skips 3
1052 let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1054 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1055 send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1056 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1057 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1058 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1059 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1060 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1062 // Do some rebalance loop payments, simultaneously
1063 let mut hops = Vec::with_capacity(3);
1064 hops.push(RouteHop {
1065 pubkey: nodes[2].node.get_our_node_id(),
1066 node_features: NodeFeatures::empty(),
1067 short_channel_id: chan_2.0.contents.short_channel_id,
1068 channel_features: ChannelFeatures::empty(),
1070 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32,
1071 maybe_announced_channel: true,
1073 hops.push(RouteHop {
1074 pubkey: nodes[3].node.get_our_node_id(),
1075 node_features: NodeFeatures::empty(),
1076 short_channel_id: chan_3.0.contents.short_channel_id,
1077 channel_features: ChannelFeatures::empty(),
1079 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32,
1080 maybe_announced_channel: true,
1082 hops.push(RouteHop {
1083 pubkey: nodes[1].node.get_our_node_id(),
1084 node_features: nodes[1].node.node_features(),
1085 short_channel_id: chan_4.0.contents.short_channel_id,
1086 channel_features: nodes[1].node.channel_features(),
1088 cltv_expiry_delta: TEST_FINAL_CLTV,
1089 maybe_announced_channel: true,
1091 hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1092 hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1093 let payment_preimage_1 = send_along_route(&nodes[1],
1094 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1095 &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1097 let mut hops = Vec::with_capacity(3);
1098 hops.push(RouteHop {
1099 pubkey: nodes[3].node.get_our_node_id(),
1100 node_features: NodeFeatures::empty(),
1101 short_channel_id: chan_4.0.contents.short_channel_id,
1102 channel_features: ChannelFeatures::empty(),
1104 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32,
1105 maybe_announced_channel: true,
1107 hops.push(RouteHop {
1108 pubkey: nodes[2].node.get_our_node_id(),
1109 node_features: NodeFeatures::empty(),
1110 short_channel_id: chan_3.0.contents.short_channel_id,
1111 channel_features: ChannelFeatures::empty(),
1113 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32,
1114 maybe_announced_channel: true,
1116 hops.push(RouteHop {
1117 pubkey: nodes[1].node.get_our_node_id(),
1118 node_features: nodes[1].node.node_features(),
1119 short_channel_id: chan_2.0.contents.short_channel_id,
1120 channel_features: nodes[1].node.channel_features(),
1122 cltv_expiry_delta: TEST_FINAL_CLTV,
1123 maybe_announced_channel: true,
1125 hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1126 hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1127 let payment_hash_2 = send_along_route(&nodes[1],
1128 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1129 &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1131 // Claim the rebalances...
1132 fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1133 claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1135 // Close down the channels...
1136 close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1137 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1138 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1139 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1140 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1141 check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1142 close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1143 check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1144 check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1145 close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1146 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1147 check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1151 fn holding_cell_htlc_counting() {
1152 // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1153 // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1154 // commitment dance rounds.
1155 let chanmon_cfgs = create_chanmon_cfgs(3);
1156 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1157 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1158 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1159 create_announced_chan_between_nodes(&nodes, 0, 1);
1160 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1162 // Fetch a route in advance as we will be unable to once we're unable to send.
1163 let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1165 let mut payments = Vec::new();
1167 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1168 nodes[1].node.send_payment_with_route(&route, payment_hash,
1169 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1170 payments.push((payment_preimage, payment_hash));
1172 check_added_monitors!(nodes[1], 1);
1174 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1175 assert_eq!(events.len(), 1);
1176 let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1177 assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1179 // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1180 // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1183 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1184 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1185 ), true, APIError::ChannelUnavailable { .. }, {});
1186 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1189 // This should also be true if we try to forward a payment.
1190 let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1192 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1193 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1194 check_added_monitors!(nodes[0], 1);
1197 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1198 assert_eq!(events.len(), 1);
1199 let payment_event = SendEvent::from_event(events.pop().unwrap());
1200 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1202 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1203 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1204 // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1205 // fails), the second will process the resulting failure and fail the HTLC backward.
1206 expect_pending_htlcs_forwardable!(nodes[1]);
1207 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1208 check_added_monitors!(nodes[1], 1);
1210 let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1211 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1212 commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1214 expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1216 // Now forward all the pending HTLCs and claim them back
1217 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1218 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1219 check_added_monitors!(nodes[2], 1);
1221 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1222 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1223 check_added_monitors!(nodes[1], 1);
1224 let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1226 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1227 check_added_monitors!(nodes[1], 1);
1228 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1230 for ref update in as_updates.update_add_htlcs.iter() {
1231 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1233 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1234 check_added_monitors!(nodes[2], 1);
1235 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1236 check_added_monitors!(nodes[2], 1);
1237 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1239 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1240 check_added_monitors!(nodes[1], 1);
1241 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1242 check_added_monitors!(nodes[1], 1);
1243 let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1245 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1246 check_added_monitors!(nodes[2], 1);
1248 expect_pending_htlcs_forwardable!(nodes[2]);
1250 let events = nodes[2].node.get_and_clear_pending_events();
1251 assert_eq!(events.len(), payments.len());
1252 for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1254 &Event::PaymentClaimable { ref payment_hash, .. } => {
1255 assert_eq!(*payment_hash, *hash);
1257 _ => panic!("Unexpected event"),
1261 for (preimage, _) in payments.drain(..) {
1262 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1265 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1269 fn duplicate_htlc_test() {
1270 // Test that we accept duplicate payment_hash HTLCs across the network and that
1271 // claiming/failing them are all separate and don't affect each other
1272 let chanmon_cfgs = create_chanmon_cfgs(6);
1273 let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1274 let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1275 let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1277 // Create some initial channels to route via 3 to 4/5 from 0/1/2
1278 create_announced_chan_between_nodes(&nodes, 0, 3);
1279 create_announced_chan_between_nodes(&nodes, 1, 3);
1280 create_announced_chan_between_nodes(&nodes, 2, 3);
1281 create_announced_chan_between_nodes(&nodes, 3, 4);
1282 create_announced_chan_between_nodes(&nodes, 3, 5);
1284 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1286 *nodes[0].network_payment_count.borrow_mut() -= 1;
1287 assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1289 *nodes[0].network_payment_count.borrow_mut() -= 1;
1290 assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1292 claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1293 fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1294 claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1298 fn test_duplicate_htlc_different_direction_onchain() {
1299 // Test that ChannelMonitor doesn't generate 2 preimage txn
1300 // when we have 2 HTLCs with same preimage that go across a node
1301 // in opposite directions, even with the same payment secret.
1302 let chanmon_cfgs = create_chanmon_cfgs(2);
1303 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1304 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1305 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1307 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1310 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1312 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1314 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1315 let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1316 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1318 // Provide preimage to node 0 by claiming payment
1319 nodes[0].node.claim_funds(payment_preimage);
1320 expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1321 check_added_monitors!(nodes[0], 1);
1323 // Broadcast node 1 commitment txn
1324 let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1326 assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1327 let mut has_both_htlcs = 0; // check htlcs match ones committed
1328 for outp in remote_txn[0].output.iter() {
1329 if outp.value.to_sat() == 800_000 / 1000 {
1330 has_both_htlcs += 1;
1331 } else if outp.value.to_sat() == 900_000 / 1000 {
1332 has_both_htlcs += 1;
1335 assert_eq!(has_both_htlcs, 2);
1337 mine_transaction(&nodes[0], &remote_txn[0]);
1338 check_added_monitors!(nodes[0], 1);
1339 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
1340 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
1342 let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1343 assert_eq!(claim_txn.len(), 3);
1345 check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1346 check_spends!(claim_txn[1], remote_txn[0]);
1347 check_spends!(claim_txn[2], remote_txn[0]);
1348 let preimage_tx = &claim_txn[0];
1349 let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1350 (&claim_txn[1], &claim_txn[2])
1352 (&claim_txn[2], &claim_txn[1])
1355 assert_eq!(preimage_tx.input.len(), 1);
1356 assert_eq!(preimage_bump_tx.input.len(), 1);
1358 assert_eq!(preimage_tx.input.len(), 1);
1359 assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1360 assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), 800);
1362 assert_eq!(timeout_tx.input.len(), 1);
1363 assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1364 check_spends!(timeout_tx, remote_txn[0]);
1365 assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value.to_sat(), 900);
1367 let events = nodes[0].node.get_and_clear_pending_msg_events();
1368 assert_eq!(events.len(), 3);
1371 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1372 MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => {
1373 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1374 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1376 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1377 assert!(update_add_htlcs.is_empty());
1378 assert!(update_fail_htlcs.is_empty());
1379 assert_eq!(update_fulfill_htlcs.len(), 1);
1380 assert!(update_fail_malformed_htlcs.is_empty());
1381 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1383 _ => panic!("Unexpected event"),
1389 fn test_basic_channel_reserve() {
1390 let chanmon_cfgs = create_chanmon_cfgs(2);
1391 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1392 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1393 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1394 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1396 let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1397 let channel_reserve = chan_stat.channel_reserve_msat;
1399 // The 2* and +1 are for the fee spike reserve.
1400 let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
1401 let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1402 let (mut route, our_payment_hash, _, our_payment_secret) =
1403 get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
1404 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1405 let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1406 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1408 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1409 if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
1410 else { panic!("Unexpected error variant"); }
1412 _ => panic!("Unexpected error variant"),
1414 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1416 send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1420 fn test_fee_spike_violation_fails_htlc() {
1421 let chanmon_cfgs = create_chanmon_cfgs(2);
1422 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1423 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1424 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1425 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1427 let (mut route, payment_hash, _, payment_secret) =
1428 get_route_and_payment_hash!(nodes[0], nodes[1], 3460000);
1429 route.paths[0].hops[0].fee_msat += 1;
1430 // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1431 let secp_ctx = Secp256k1::new();
1432 let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1434 let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1436 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1437 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1438 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1439 3460001, &recipient_onion_fields, cur_height, &None).unwrap();
1440 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1441 let msg = msgs::UpdateAddHTLC {
1444 amount_msat: htlc_msat,
1445 payment_hash: payment_hash,
1446 cltv_expiry: htlc_cltv,
1447 onion_routing_packet: onion_packet,
1448 skimmed_fee_msat: None,
1449 blinding_point: None,
1452 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1454 // Now manually create the commitment_signed message corresponding to the update_add
1455 // nodes[0] just sent. In the code for construction of this message, "local" refers
1456 // to the sender of the message, and "remote" refers to the receiver.
1458 let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1460 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1462 // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
1463 // needed to sign the new commitment tx and (2) sign the new commitment tx.
1464 let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1465 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1466 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1467 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
1468 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1469 ).flatten().unwrap();
1470 let chan_signer = local_chan.get_signer();
1471 // Make the signer believe we validated another commitment, so we can release the secret
1472 chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
1474 let pubkeys = chan_signer.as_ref().pubkeys();
1475 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1476 chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1477 chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1478 chan_signer.as_ref().pubkeys().funding_pubkey)
1480 let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1481 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1482 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1483 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
1484 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1485 ).flatten().unwrap();
1486 let chan_signer = remote_chan.get_signer();
1487 let pubkeys = chan_signer.as_ref().pubkeys();
1488 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1489 chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1490 chan_signer.as_ref().pubkeys().funding_pubkey)
1493 // Assemble the set of keys we can use for signatures for our commitment_signed message.
1494 let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1495 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1497 // Build the remote commitment transaction so we can sign it, and then later use the
1498 // signature for the commitment_signed message.
1499 let local_chan_balance = 1313;
1501 let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1503 amount_msat: 3460001,
1504 cltv_expiry: htlc_cltv,
1506 transaction_output_index: Some(1),
1509 let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1512 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1513 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1514 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
1515 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1516 ).flatten().unwrap();
1517 let local_chan_signer = local_chan.get_signer();
1518 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1522 local_funding, remote_funding,
1523 commit_tx_keys.clone(),
1525 &mut vec![(accepted_htlc_info, ())],
1526 &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
1528 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
1531 let commit_signed_msg = msgs::CommitmentSigned {
1534 htlc_signatures: res.1,
1536 partial_signature_with_nonce: None,
1539 // Send the commitment_signed message to the nodes[1].
1540 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1541 let _ = nodes[1].node.get_and_clear_pending_msg_events();
1543 // Send the RAA to nodes[1].
1544 let raa_msg = msgs::RevokeAndACK {
1546 per_commitment_secret: local_secret,
1547 next_per_commitment_point: next_local_point,
1549 next_local_nonce: None,
1551 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1553 let events = nodes[1].node.get_and_clear_pending_msg_events();
1554 assert_eq!(events.len(), 1);
1555 // Make sure the HTLC failed in the way we expect.
1557 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1558 assert_eq!(update_fail_htlcs.len(), 1);
1559 update_fail_htlcs[0].clone()
1561 _ => panic!("Unexpected event"),
1563 nodes[1].logger.assert_log("lightning::ln::channel",
1564 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
1566 check_added_monitors!(nodes[1], 2);
1570 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1571 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1572 // Set the fee rate for the channel very high, to the point where the fundee
1573 // sending any above-dust amount would result in a channel reserve violation.
1574 // In this test we check that we would be prevented from sending an HTLC in
1576 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1577 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1578 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1579 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1580 let default_config = UserConfig::default();
1581 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1583 let mut push_amt = 100_000_000;
1584 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1586 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1588 let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1590 // Fetch a route in advance as we will be unable to once we're unable to send.
1591 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1592 // Sending exactly enough to hit the reserve amount should be accepted
1593 for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1594 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1597 // However one more HTLC should be significantly over the reserve amount and fail.
1598 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1599 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1600 ), true, APIError::ChannelUnavailable { .. }, {});
1601 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1605 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1606 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1607 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1608 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1609 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1610 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1611 let default_config = UserConfig::default();
1612 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1614 // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1615 // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1616 // transaction fee with 0 HTLCs (183 sats)).
1617 let mut push_amt = 100_000_000;
1618 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1619 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1620 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1622 // Send four HTLCs to cover the initial push_msat buffer we're required to include
1623 for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1624 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1627 let (mut route, payment_hash, _, payment_secret) =
1628 get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1629 route.paths[0].hops[0].fee_msat = 700_000;
1630 // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1631 let secp_ctx = Secp256k1::new();
1632 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1633 let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1634 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1635 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1636 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1637 700_000, &recipient_onion_fields, cur_height, &None).unwrap();
1638 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1639 let msg = msgs::UpdateAddHTLC {
1641 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1642 amount_msat: htlc_msat,
1643 payment_hash: payment_hash,
1644 cltv_expiry: htlc_cltv,
1645 onion_routing_packet: onion_packet,
1646 skimmed_fee_msat: None,
1647 blinding_point: None,
1650 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1651 // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1652 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3);
1653 assert_eq!(nodes[0].node.list_channels().len(), 0);
1654 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1655 assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1656 check_added_monitors!(nodes[0], 1);
1657 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
1658 [nodes[1].node.get_our_node_id()], 100000);
1662 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1663 // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1664 // calculating our commitment transaction fee (this was previously broken).
1665 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1666 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1668 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1669 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1670 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1671 let default_config = UserConfig::default();
1672 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1674 // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1675 // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1676 // transaction fee with 0 HTLCs (183 sats)).
1677 let mut push_amt = 100_000_000;
1678 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1679 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1680 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1682 let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1683 + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
1684 // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1685 // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1686 // commitment transaction fee.
1687 route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1689 // Send four HTLCs to cover the initial push_msat buffer we're required to include
1690 for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1691 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1694 // One more than the dust amt should fail, however.
1695 let (mut route, our_payment_hash, _, our_payment_secret) =
1696 get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
1697 route.paths[0].hops[0].fee_msat += 1;
1698 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1699 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1700 ), true, APIError::ChannelUnavailable { .. }, {});
1704 fn test_chan_init_feerate_unaffordability() {
1705 // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1706 // channel reserve and feerate requirements.
1707 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1708 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1709 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1710 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1711 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1712 let default_config = UserConfig::default();
1713 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1715 // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1717 let mut push_amt = 100_000_000;
1718 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1719 assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(),
1720 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1722 // During open, we don't have a "counterparty channel reserve" to check against, so that
1723 // requirement only comes into play on the open_channel handling side.
1724 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1725 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap();
1726 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1727 open_channel_msg.push_msat += 1;
1728 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1730 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1731 assert_eq!(msg_events.len(), 1);
1732 match msg_events[0] {
1733 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1734 assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1736 _ => panic!("Unexpected event"),
1741 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1742 // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1743 // calculating our counterparty's commitment transaction fee (this was previously broken).
1744 let chanmon_cfgs = create_chanmon_cfgs(2);
1745 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1746 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1747 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1748 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1750 let payment_amt = 46000; // Dust amount
1751 // In the previous code, these first four payments would succeed.
1752 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1753 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1754 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1755 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1757 // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1758 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1759 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1760 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1761 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1762 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1764 // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1765 // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1766 // transaction fee and therefore perceived this next payment as a channel reserve violation.
1767 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1771 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1772 let chanmon_cfgs = create_chanmon_cfgs(3);
1773 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1774 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1775 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1776 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1777 let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1780 let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1781 let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1782 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1783 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
1785 // Add a 2* and +1 for the fee spike reserve.
1786 let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1787 let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1788 let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1790 // Add a pending HTLC.
1791 let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1792 let payment_event_1 = {
1793 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1794 RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1795 check_added_monitors!(nodes[0], 1);
1797 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1798 assert_eq!(events.len(), 1);
1799 SendEvent::from_event(events.remove(0))
1801 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1803 // Attempt to trigger a channel reserve violation --> payment failure.
1804 let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
1805 let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1806 let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1807 let mut route_2 = route_1.clone();
1808 route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2;
1810 // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1811 let secp_ctx = Secp256k1::new();
1812 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1813 let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
1814 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1815 let recipient_onion_fields = RecipientOnionFields::spontaneous_empty();
1816 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
1817 &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None).unwrap();
1818 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
1819 let msg = msgs::UpdateAddHTLC {
1822 amount_msat: htlc_msat + 1,
1823 payment_hash: our_payment_hash_1,
1824 cltv_expiry: htlc_cltv,
1825 onion_routing_packet: onion_packet,
1826 skimmed_fee_msat: None,
1827 blinding_point: None,
1830 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1831 // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1832 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3);
1833 assert_eq!(nodes[1].node.list_channels().len(), 1);
1834 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1835 assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1836 check_added_monitors!(nodes[1], 1);
1837 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
1838 [nodes[0].node.get_our_node_id()], 100000);
1842 fn test_inbound_outbound_capacity_is_not_zero() {
1843 let chanmon_cfgs = create_chanmon_cfgs(2);
1844 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1845 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1846 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1847 let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1848 let channels0 = node_chanmgrs[0].list_channels();
1849 let channels1 = node_chanmgrs[1].list_channels();
1850 let default_config = UserConfig::default();
1851 assert_eq!(channels0.len(), 1);
1852 assert_eq!(channels1.len(), 1);
1854 let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1855 assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1856 assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1858 assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1859 assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1862 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
1863 (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1867 fn test_channel_reserve_holding_cell_htlcs() {
1868 let chanmon_cfgs = create_chanmon_cfgs(3);
1869 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1870 // When this test was written, the default base fee floated based on the HTLC count.
1871 // It is now fixed, so we simply set the fee to the expected value here.
1872 let mut config = test_default_channel_config();
1873 config.channel_config.forwarding_fee_base_msat = 239;
1874 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1875 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1876 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1877 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1879 let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1880 let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1882 let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1883 let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1885 macro_rules! expect_forward {
1887 let mut events = $node.node.get_and_clear_pending_msg_events();
1888 assert_eq!(events.len(), 1);
1889 check_added_monitors!($node, 1);
1890 let payment_event = SendEvent::from_event(events.remove(0));
1895 let feemsat = 239; // set above
1896 let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1897 let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1898 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
1900 let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1902 // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1904 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1905 .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1906 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
1907 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1908 assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1910 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1911 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1912 ), true, APIError::ChannelUnavailable { .. }, {});
1913 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1916 // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1917 // nodes[0]'s wealth
1919 let amt_msat = recv_value_0 + total_fee_msat;
1920 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1921 // Also, ensure that each payment has enough to be over the dust limit to
1922 // ensure it'll be included in each commit tx fee calculation.
1923 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1924 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1925 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1929 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1930 .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1931 let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
1932 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1933 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1935 let (stat01_, stat11_, stat12_, stat22_) = (
1936 get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1937 get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1938 get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1939 get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1942 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1943 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1944 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1945 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1946 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1949 // adding pending output.
1950 // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1951 // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1952 // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1953 // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1954 // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1955 // cases where 1 msat over X amount will cause a payment failure, but anything less than
1956 // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1957 // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1958 // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1960 let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1961 let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1962 let amt_msat_1 = recv_value_1 + total_fee_msat;
1964 let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1965 let payment_event_1 = {
1966 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1967 RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1968 check_added_monitors!(nodes[0], 1);
1970 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1971 assert_eq!(events.len(), 1);
1972 SendEvent::from_event(events.remove(0))
1974 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1976 // channel reserve test with htlc pending output > 0
1977 let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1979 let mut route = route_1.clone();
1980 route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1;
1981 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
1982 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1983 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1984 ), true, APIError::ChannelUnavailable { .. }, {});
1985 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1988 // split the rest to test holding cell
1989 let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1990 let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1991 let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1992 let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1994 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1995 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1998 // now see if they go through on both sides
1999 let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
2000 // but this will stuck in the holding cell
2001 nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
2002 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
2003 check_added_monitors!(nodes[0], 0);
2004 let events = nodes[0].node.get_and_clear_pending_events();
2005 assert_eq!(events.len(), 0);
2007 // test with outbound holding cell amount > 0
2009 let (mut route, our_payment_hash, _, our_payment_secret) =
2010 get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2011 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
2012 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
2013 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
2014 ), true, APIError::ChannelUnavailable { .. }, {});
2015 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2018 let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2019 // this will also stuck in the holding cell
2020 nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
2021 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
2022 check_added_monitors!(nodes[0], 0);
2023 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2024 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2026 // flush the pending htlc
2027 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
2028 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2029 check_added_monitors!(nodes[1], 1);
2031 // the pending htlc should be promoted to committed
2032 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
2033 check_added_monitors!(nodes[0], 1);
2034 let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2036 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
2037 let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2038 // No commitment_signed so get_event_msg's assert(len == 1) passes
2039 check_added_monitors!(nodes[0], 1);
2041 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
2042 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2043 check_added_monitors!(nodes[1], 1);
2045 expect_pending_htlcs_forwardable!(nodes[1]);
2047 let ref payment_event_11 = expect_forward!(nodes[1]);
2048 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
2049 commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
2051 expect_pending_htlcs_forwardable!(nodes[2]);
2052 expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
2054 // flush the htlcs in the holding cell
2055 assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
2056 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
2057 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
2058 commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
2059 expect_pending_htlcs_forwardable!(nodes[1]);
2061 let ref payment_event_3 = expect_forward!(nodes[1]);
2062 assert_eq!(payment_event_3.msgs.len(), 2);
2063 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2064 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2066 commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2067 expect_pending_htlcs_forwardable!(nodes[2]);
2069 let events = nodes[2].node.get_and_clear_pending_events();
2070 assert_eq!(events.len(), 2);
2072 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2073 assert_eq!(our_payment_hash_21, *payment_hash);
2074 assert_eq!(recv_value_21, amount_msat);
2075 assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2076 assert_eq!(via_channel_id, Some(chan_2.2));
2078 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2079 assert!(payment_preimage.is_none());
2080 assert_eq!(our_payment_secret_21, *payment_secret);
2082 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2085 _ => panic!("Unexpected event"),
2088 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2089 assert_eq!(our_payment_hash_22, *payment_hash);
2090 assert_eq!(recv_value_22, amount_msat);
2091 assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2092 assert_eq!(via_channel_id, Some(chan_2.2));
2094 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2095 assert!(payment_preimage.is_none());
2096 assert_eq!(our_payment_secret_22, *payment_secret);
2098 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2101 _ => panic!("Unexpected event"),
2104 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2105 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2106 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2108 let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
2109 let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2110 send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2112 let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
2113 let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2114 let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2115 assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2116 assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2118 let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2119 assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2123 fn channel_reserve_in_flight_removes() {
2124 // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2125 // can send to its counterparty, but due to update ordering, the other side may not yet have
2126 // considered those HTLCs fully removed.
2127 // This tests that we don't count HTLCs which will not be included in the next remote
2128 // commitment transaction towards the reserve value (as it implies no commitment transaction
2129 // will be generated which violates the remote reserve value).
2130 // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2132 // * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2133 // you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2134 // you only consider the value of the first HTLC, it may not),
2135 // * start routing a third HTLC from A to B,
2136 // * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2137 // the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2138 // * deliver the first fulfill from B
2139 // * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2141 // * deliver A's response CS and RAA.
2142 // This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2143 // removed it fully. B now has the push_msat plus the first two HTLCs in value.
2144 // * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2145 // of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2146 let chanmon_cfgs = create_chanmon_cfgs(2);
2147 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2148 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2149 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2150 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2152 let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2153 // Route the first two HTLCs.
2154 let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2155 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2156 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2158 // Start routing the third HTLC (this is just used to get everyone in the right state).
2159 let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2161 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2162 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2163 check_added_monitors!(nodes[0], 1);
2164 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2165 assert_eq!(events.len(), 1);
2166 SendEvent::from_event(events.remove(0))
2169 // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2170 // initial fulfill/CS.
2171 nodes[1].node.claim_funds(payment_preimage_1);
2172 expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2173 check_added_monitors!(nodes[1], 1);
2174 let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2176 // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2177 // remove the second HTLC when we send the HTLC back from B to A.
2178 nodes[1].node.claim_funds(payment_preimage_2);
2179 expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2180 check_added_monitors!(nodes[1], 1);
2181 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2183 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2184 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2185 check_added_monitors!(nodes[0], 1);
2186 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2187 expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2189 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2190 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2191 check_added_monitors!(nodes[1], 1);
2192 // B is already AwaitingRAA, so cant generate a CS here
2193 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2195 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2196 check_added_monitors!(nodes[1], 1);
2197 let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2199 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2200 check_added_monitors!(nodes[0], 1);
2201 let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2203 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2204 check_added_monitors!(nodes[1], 1);
2205 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2207 // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2208 // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2209 // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2210 // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2211 // on-chain as necessary).
2212 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2213 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2214 check_added_monitors!(nodes[0], 1);
2215 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2216 expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
2218 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2219 check_added_monitors!(nodes[1], 1);
2220 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2222 expect_pending_htlcs_forwardable!(nodes[1]);
2223 expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2225 // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2226 // resolve the second HTLC from A's point of view.
2227 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2228 check_added_monitors!(nodes[0], 1);
2229 expect_payment_path_successful!(nodes[0]);
2230 let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2232 // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2233 // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2234 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2236 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2237 RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2238 check_added_monitors!(nodes[1], 1);
2239 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2240 assert_eq!(events.len(), 1);
2241 SendEvent::from_event(events.remove(0))
2244 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2245 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2246 check_added_monitors!(nodes[0], 1);
2247 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2249 // Now just resolve all the outstanding messages/HTLCs for completeness...
2251 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2252 check_added_monitors!(nodes[1], 1);
2253 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2255 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2256 check_added_monitors!(nodes[1], 1);
2258 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2259 check_added_monitors!(nodes[0], 1);
2260 expect_payment_path_successful!(nodes[0]);
2261 let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2263 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2264 check_added_monitors!(nodes[1], 1);
2265 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2267 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2268 check_added_monitors!(nodes[0], 1);
2270 expect_pending_htlcs_forwardable!(nodes[0]);
2271 expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2273 claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2274 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2278 fn channel_monitor_network_test() {
2279 // Simple test which builds a network of ChannelManagers, connects them to each other, and
2280 // tests that ChannelMonitor is able to recover from various states.
2281 let chanmon_cfgs = create_chanmon_cfgs(5);
2282 let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2283 let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2284 let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2286 // Create some initial channels
2287 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2288 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2289 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2290 let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2292 // Make sure all nodes are at the same starting height
2293 connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2294 connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2295 connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2296 connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2297 connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2299 // Rebalance the network a bit by relaying one payment through all the channels...
2300 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2301 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2302 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2303 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2305 // Simple case with no pending HTLCs:
2306 nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2307 check_added_monitors!(nodes[1], 1);
2308 check_closed_broadcast!(nodes[1], true);
2309 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
2311 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2312 assert_eq!(node_txn.len(), 1);
2313 mine_transaction(&nodes[1], &node_txn[0]);
2314 if nodes[1].connect_style.borrow().updates_best_block_first() {
2315 let _ = nodes[1].tx_broadcaster.txn_broadcast();
2318 mine_transaction(&nodes[0], &node_txn[0]);
2319 check_added_monitors!(nodes[0], 1);
2320 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2322 check_closed_broadcast!(nodes[0], true);
2323 assert_eq!(nodes[0].node.list_channels().len(), 0);
2324 assert_eq!(nodes[1].node.list_channels().len(), 1);
2325 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2327 // One pending HTLC is discarded by the force-close:
2328 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2330 // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2331 // broadcasted until we reach the timelock time).
2332 nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2333 check_closed_broadcast!(nodes[1], true);
2334 check_added_monitors!(nodes[1], 1);
2336 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2337 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2338 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2339 mine_transaction(&nodes[2], &node_txn[0]);
2340 check_added_monitors!(nodes[2], 1);
2341 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2343 check_closed_broadcast!(nodes[2], true);
2344 assert_eq!(nodes[1].node.list_channels().len(), 0);
2345 assert_eq!(nodes[2].node.list_channels().len(), 1);
2346 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
2347 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2349 macro_rules! claim_funds {
2350 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2352 $node.node.claim_funds($preimage);
2353 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2354 check_added_monitors!($node, 1);
2356 let events = $node.node.get_and_clear_pending_msg_events();
2357 assert_eq!(events.len(), 1);
2359 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2360 assert!(update_add_htlcs.is_empty());
2361 assert!(update_fail_htlcs.is_empty());
2362 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2364 _ => panic!("Unexpected event"),
2370 // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2371 // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2372 nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2373 check_added_monitors!(nodes[2], 1);
2374 check_closed_broadcast!(nodes[2], true);
2375 let node2_commitment_txid;
2377 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2378 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2379 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2380 node2_commitment_txid = node_txn[0].txid();
2382 // Claim the payment on nodes[3], giving it knowledge of the preimage
2383 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2384 mine_transaction(&nodes[3], &node_txn[0]);
2385 check_added_monitors!(nodes[3], 1);
2386 check_preimage_claim(&nodes[3], &node_txn);
2388 check_closed_broadcast!(nodes[3], true);
2389 assert_eq!(nodes[2].node.list_channels().len(), 0);
2390 assert_eq!(nodes[3].node.list_channels().len(), 1);
2391 check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
2392 check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2394 // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2395 // confusing us in the following tests.
2396 let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2398 // One pending HTLC to time out:
2399 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2400 // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2403 let (close_chan_update_1, close_chan_update_2) = {
2404 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2405 let events = nodes[3].node.get_and_clear_pending_msg_events();
2406 assert_eq!(events.len(), 2);
2407 let close_chan_update_1 = match events[1] {
2408 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2411 _ => panic!("Unexpected event"),
2414 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2415 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2417 _ => panic!("Unexpected event"),
2419 check_added_monitors!(nodes[3], 1);
2421 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2423 let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2424 node_txn.retain(|tx| {
2425 if tx.input[0].previous_output.txid == node2_commitment_txid {
2431 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2433 // Claim the payment on nodes[4], giving it knowledge of the preimage
2434 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2436 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2437 let events = nodes[4].node.get_and_clear_pending_msg_events();
2438 assert_eq!(events.len(), 2);
2439 let close_chan_update_2 = match events[1] {
2440 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2443 _ => panic!("Unexpected event"),
2446 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2447 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2449 _ => panic!("Unexpected event"),
2451 check_added_monitors!(nodes[4], 1);
2452 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2453 check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
2455 mine_transaction(&nodes[4], &node_txn[0]);
2456 check_preimage_claim(&nodes[4], &node_txn);
2457 (close_chan_update_1, close_chan_update_2)
2459 nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2460 nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2461 assert_eq!(nodes[3].node.list_channels().len(), 0);
2462 assert_eq!(nodes[4].node.list_channels().len(), 0);
2464 assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2465 Ok(ChannelMonitorUpdateStatus::Completed));
2466 check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
2470 fn test_justice_tx_htlc_timeout() {
2471 // Test justice txn built on revoked HTLC-Timeout tx, against both sides
2472 let mut alice_config = test_default_channel_config();
2473 alice_config.channel_handshake_config.announced_channel = true;
2474 alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2475 alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2476 let mut bob_config = test_default_channel_config();
2477 bob_config.channel_handshake_config.announced_channel = true;
2478 bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2479 bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2480 let user_cfgs = [Some(alice_config), Some(bob_config)];
2481 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2482 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2483 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2484 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2485 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2486 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2487 // Create some new channels:
2488 let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2490 // A pending HTLC which will be revoked:
2491 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2492 // Get the will-be-revoked local txn from nodes[0]
2493 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2494 assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2495 assert_eq!(revoked_local_txn[0].input.len(), 1);
2496 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2497 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2498 assert_eq!(revoked_local_txn[1].input.len(), 1);
2499 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2500 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2501 // Revoke the old state
2502 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2505 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2507 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2508 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2509 assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2510 check_spends!(node_txn[0], revoked_local_txn[0]);
2511 node_txn.swap_remove(0);
2513 check_added_monitors!(nodes[1], 1);
2514 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2515 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2517 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2518 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2519 // Verify broadcast of revoked HTLC-timeout
2520 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2521 check_added_monitors!(nodes[0], 1);
2522 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2523 // Broadcast revoked HTLC-timeout on node 1
2524 mine_transaction(&nodes[1], &node_txn[1]);
2525 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2527 get_announce_close_broadcast_events(&nodes, 0, 1);
2528 assert_eq!(nodes[0].node.list_channels().len(), 0);
2529 assert_eq!(nodes[1].node.list_channels().len(), 0);
2533 fn test_justice_tx_htlc_success() {
2534 // Test justice txn built on revoked HTLC-Success tx, against both sides
2535 let mut alice_config = test_default_channel_config();
2536 alice_config.channel_handshake_config.announced_channel = true;
2537 alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2538 alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2539 let mut bob_config = test_default_channel_config();
2540 bob_config.channel_handshake_config.announced_channel = true;
2541 bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2542 bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2543 let user_cfgs = [Some(alice_config), Some(bob_config)];
2544 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2545 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2546 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2547 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2548 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2549 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2550 // Create some new channels:
2551 let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2553 // A pending HTLC which will be revoked:
2554 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2555 // Get the will-be-revoked local txn from B
2556 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2557 assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2558 assert_eq!(revoked_local_txn[0].input.len(), 1);
2559 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2560 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2561 // Revoke the old state
2562 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2564 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2566 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2567 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2568 assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2570 check_spends!(node_txn[0], revoked_local_txn[0]);
2571 node_txn.swap_remove(0);
2573 check_added_monitors!(nodes[0], 1);
2574 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2576 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2577 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2578 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2579 check_added_monitors!(nodes[1], 1);
2580 mine_transaction(&nodes[0], &node_txn[1]);
2581 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2582 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2584 get_announce_close_broadcast_events(&nodes, 0, 1);
2585 assert_eq!(nodes[0].node.list_channels().len(), 0);
2586 assert_eq!(nodes[1].node.list_channels().len(), 0);
2590 fn revoked_output_claim() {
2591 // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2592 // transaction is broadcast by its counterparty
2593 let chanmon_cfgs = create_chanmon_cfgs(2);
2594 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2595 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2596 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2597 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2598 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2599 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2600 assert_eq!(revoked_local_txn.len(), 1);
2601 // Only output is the full channel value back to nodes[0]:
2602 assert_eq!(revoked_local_txn[0].output.len(), 1);
2603 // Send a payment through, updating everyone's latest commitment txn
2604 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2606 // Inform nodes[1] that nodes[0] broadcast a stale tx
2607 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2608 check_added_monitors!(nodes[1], 1);
2609 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2610 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2611 assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2613 check_spends!(node_txn[0], revoked_local_txn[0]);
2615 // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2616 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2617 get_announce_close_broadcast_events(&nodes, 0, 1);
2618 check_added_monitors!(nodes[0], 1);
2619 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2623 fn test_forming_justice_tx_from_monitor_updates() {
2624 do_test_forming_justice_tx_from_monitor_updates(true);
2625 do_test_forming_justice_tx_from_monitor_updates(false);
2628 fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) {
2629 // Simple test to make sure that the justice tx formed in WatchtowerPersister
2630 // is properly formed and can be broadcasted/confirmed successfully in the event
2631 // that a revoked commitment transaction is broadcasted
2632 // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
2633 let chanmon_cfgs = create_chanmon_cfgs(2);
2634 let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap();
2635 let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap();
2636 let persisters = vec![WatchtowerPersister::new(destination_script0),
2637 WatchtowerPersister::new(destination_script1)];
2638 let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
2639 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2640 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2641 let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2642 let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
2644 if !broadcast_initial_commitment {
2645 // Send a payment to move the channel forward
2646 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2649 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output.
2650 // We'll keep this commitment transaction to broadcast once it's revoked.
2651 let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id);
2652 assert_eq!(revoked_local_txn.len(), 1);
2653 let revoked_commitment_tx = &revoked_local_txn[0];
2655 // Send another payment, now revoking the previous commitment tx
2656 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2658 let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap();
2659 check_spends!(justice_tx, revoked_commitment_tx);
2661 mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]);
2662 mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]);
2664 check_added_monitors!(nodes[1], 1);
2665 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
2666 &[nodes[0].node.get_our_node_id()], 100_000);
2667 get_announce_close_broadcast_events(&nodes, 1, 0);
2669 check_added_monitors!(nodes[0], 1);
2670 check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
2671 &[nodes[1].node.get_our_node_id()], 100_000);
2673 // Check that the justice tx has sent the revoked output value to nodes[1]
2674 let monitor = get_monitor!(nodes[1], channel_id);
2675 let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| {
2677 channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis,
2678 _ => panic!("Unexpected balance type"),
2681 // On the first commitment, node[1]'s balance was below dust so it didn't have an output
2682 let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value.to_sat() };
2683 let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value.to_sat();
2684 assert_eq!(total_claimable_balance, expected_claimable_balance);
2689 fn claim_htlc_outputs_shared_tx() {
2690 // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2691 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2692 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2693 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2694 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2695 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2697 // Create some new channel:
2698 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2700 // Rebalance the network to generate htlc in the two directions
2701 send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2702 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2703 let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2704 let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2706 // Get the will-be-revoked local txn from node[0]
2707 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2708 assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2709 assert_eq!(revoked_local_txn[0].input.len(), 1);
2710 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2711 assert_eq!(revoked_local_txn[1].input.len(), 1);
2712 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2713 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2714 check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2716 //Revoke the old state
2717 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2720 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2721 check_added_monitors!(nodes[0], 1);
2722 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2723 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2724 check_added_monitors!(nodes[1], 1);
2725 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2726 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2727 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2729 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2730 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2732 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2733 check_spends!(node_txn[0], revoked_local_txn[0]);
2735 let mut witness_lens = BTreeSet::new();
2736 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2737 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2738 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2739 assert_eq!(witness_lens.len(), 3);
2740 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2741 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2742 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2744 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2745 // ANTI_REORG_DELAY confirmations.
2746 mine_transaction(&nodes[1], &node_txn[0]);
2747 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2748 expect_payment_failed!(nodes[1], payment_hash_2, false);
2750 get_announce_close_broadcast_events(&nodes, 0, 1);
2751 assert_eq!(nodes[0].node.list_channels().len(), 0);
2752 assert_eq!(nodes[1].node.list_channels().len(), 0);
2756 fn claim_htlc_outputs_single_tx() {
2757 // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2758 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2759 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2760 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2761 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2762 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2764 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2766 // Rebalance the network to generate htlc in the two directions
2767 send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2768 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2769 // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2770 let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2771 let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2773 // Get the will-be-revoked local txn from node[0]
2774 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2776 //Revoke the old state
2777 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2780 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2781 check_added_monitors!(nodes[0], 1);
2782 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2783 check_added_monitors!(nodes[1], 1);
2784 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2785 let mut events = nodes[0].node.get_and_clear_pending_events();
2786 expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
2787 match events.last().unwrap() {
2788 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2789 _ => panic!("Unexpected event"),
2792 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2793 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2795 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcast();
2797 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2798 assert_eq!(node_txn[0].input.len(), 1);
2799 check_spends!(node_txn[0], chan_1.3);
2800 assert_eq!(node_txn[1].input.len(), 1);
2801 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2802 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2803 check_spends!(node_txn[1], node_txn[0]);
2805 // Filter out any non justice transactions.
2806 node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].txid());
2807 assert!(node_txn.len() > 3);
2809 assert_eq!(node_txn[0].input.len(), 1);
2810 assert_eq!(node_txn[1].input.len(), 1);
2811 assert_eq!(node_txn[2].input.len(), 1);
2813 check_spends!(node_txn[0], revoked_local_txn[0]);
2814 check_spends!(node_txn[1], revoked_local_txn[0]);
2815 check_spends!(node_txn[2], revoked_local_txn[0]);
2817 let mut witness_lens = BTreeSet::new();
2818 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2819 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
2820 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2821 assert_eq!(witness_lens.len(), 3);
2822 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2823 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2824 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2826 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2827 // ANTI_REORG_DELAY confirmations.
2828 mine_transaction(&nodes[1], &node_txn[0]);
2829 mine_transaction(&nodes[1], &node_txn[1]);
2830 mine_transaction(&nodes[1], &node_txn[2]);
2831 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2832 expect_payment_failed!(nodes[1], payment_hash_2, false);
2834 get_announce_close_broadcast_events(&nodes, 0, 1);
2835 assert_eq!(nodes[0].node.list_channels().len(), 0);
2836 assert_eq!(nodes[1].node.list_channels().len(), 0);
2840 fn test_htlc_on_chain_success() {
2841 // Test that in case of a unilateral close onchain, we detect the state of output and pass
2842 // the preimage backward accordingly. So here we test that ChannelManager is
2843 // broadcasting the right event to other nodes in payment path.
2844 // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2845 // A --------------------> B ----------------------> C (preimage)
2846 // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2847 // commitment transaction was broadcast.
2848 // Then, B should learn the preimage from said transactions, attempting to claim backwards
2850 // B should be able to claim via preimage if A then broadcasts its local tx.
2851 // Finally, when A sees B's latest local commitment transaction it should be able to claim
2852 // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2853 // PaymentSent event).
2855 let chanmon_cfgs = create_chanmon_cfgs(3);
2856 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2857 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2858 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2860 // Create some initial channels
2861 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2862 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2864 // Ensure all nodes are at the same height
2865 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2866 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2867 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2868 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2870 // Rebalance the network a bit by relaying one payment through all the channels...
2871 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2872 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2874 let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2875 let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2877 // Broadcast legit commitment tx from C on B's chain
2878 // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2879 let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2880 assert_eq!(commitment_tx.len(), 1);
2881 check_spends!(commitment_tx[0], chan_2.3);
2882 nodes[2].node.claim_funds(our_payment_preimage);
2883 expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2884 nodes[2].node.claim_funds(our_payment_preimage_2);
2885 expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2886 check_added_monitors!(nodes[2], 2);
2887 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2888 assert!(updates.update_add_htlcs.is_empty());
2889 assert!(updates.update_fail_htlcs.is_empty());
2890 assert!(updates.update_fail_malformed_htlcs.is_empty());
2891 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2893 mine_transaction(&nodes[2], &commitment_tx[0]);
2894 check_closed_broadcast!(nodes[2], true);
2895 check_added_monitors!(nodes[2], 1);
2896 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2897 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2898 assert_eq!(node_txn.len(), 2);
2899 check_spends!(node_txn[0], commitment_tx[0]);
2900 check_spends!(node_txn[1], commitment_tx[0]);
2901 assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2902 assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2903 assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output
2904 assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output
2905 assert_eq!(node_txn[0].lock_time, LockTime::ZERO);
2906 assert_eq!(node_txn[1].lock_time, LockTime::ZERO);
2908 // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2909 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
2910 connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2912 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2913 assert_eq!(added_monitors.len(), 1);
2914 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2915 added_monitors.clear();
2917 let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2918 assert_eq!(forwarded_events.len(), 3);
2919 match forwarded_events[0] {
2920 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2921 _ => panic!("Unexpected event"),
2923 let chan_id = Some(chan_1.2);
2924 match forwarded_events[1] {
2925 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2926 next_channel_id, outbound_amount_forwarded_msat, ..
2928 assert_eq!(total_fee_earned_msat, Some(1000));
2929 assert_eq!(prev_channel_id, chan_id);
2930 assert_eq!(claim_from_onchain_tx, true);
2931 assert_eq!(next_channel_id, Some(chan_2.2));
2932 assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2936 match forwarded_events[2] {
2937 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2938 next_channel_id, outbound_amount_forwarded_msat, ..
2940 assert_eq!(total_fee_earned_msat, Some(1000));
2941 assert_eq!(prev_channel_id, chan_id);
2942 assert_eq!(claim_from_onchain_tx, true);
2943 assert_eq!(next_channel_id, Some(chan_2.2));
2944 assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2948 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2950 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2951 assert_eq!(added_monitors.len(), 2);
2952 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2953 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2954 added_monitors.clear();
2956 assert_eq!(events.len(), 3);
2958 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2959 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2961 match nodes_2_event {
2962 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
2963 _ => panic!("Unexpected event"),
2966 match nodes_0_event {
2967 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2968 assert!(update_add_htlcs.is_empty());
2969 assert!(update_fail_htlcs.is_empty());
2970 assert_eq!(update_fulfill_htlcs.len(), 1);
2971 assert!(update_fail_malformed_htlcs.is_empty());
2972 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2974 _ => panic!("Unexpected event"),
2977 // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2979 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2980 _ => panic!("Unexpected event"),
2983 macro_rules! check_tx_local_broadcast {
2984 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2985 let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2986 assert_eq!(node_txn.len(), 2);
2987 // Node[1]: 2 * HTLC-timeout tx
2988 // Node[0]: 2 * HTLC-timeout tx
2989 check_spends!(node_txn[0], $commitment_tx);
2990 check_spends!(node_txn[1], $commitment_tx);
2991 assert_ne!(node_txn[0].lock_time, LockTime::ZERO);
2992 assert_ne!(node_txn[1].lock_time, LockTime::ZERO);
2994 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2995 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2996 assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output
2997 assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output
2999 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3000 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3001 assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment
3002 assert!(node_txn[1].output[0].script_pubkey.is_p2wpkh()); // direct payment
3007 // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
3008 check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
3010 // Broadcast legit commitment tx from A on B's chain
3011 // Broadcast preimage tx by B on offered output from A commitment tx on A's chain
3012 let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
3013 check_spends!(node_a_commitment_tx[0], chan_1.3);
3014 mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
3015 check_closed_broadcast!(nodes[1], true);
3016 check_added_monitors!(nodes[1], 1);
3017 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3018 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3019 assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
3020 let commitment_spend =
3021 if node_txn.len() == 1 {
3024 // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
3025 // FullBlockViaListen
3026 if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
3027 check_spends!(node_txn[1], commitment_tx[0]);
3028 check_spends!(node_txn[2], commitment_tx[0]);
3029 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
3032 check_spends!(node_txn[0], commitment_tx[0]);
3033 check_spends!(node_txn[1], commitment_tx[0]);
3034 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
3039 check_spends!(commitment_spend, node_a_commitment_tx[0]);
3040 assert_eq!(commitment_spend.input.len(), 2);
3041 assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3042 assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3043 assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1);
3044 assert!(commitment_spend.output[0].script_pubkey.is_p2wpkh()); // direct payment
3045 // We don't bother to check that B can claim the HTLC output on its commitment tx here as
3046 // we already checked the same situation with A.
3048 // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
3049 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
3050 connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3051 check_closed_broadcast!(nodes[0], true);
3052 check_added_monitors!(nodes[0], 1);
3053 let events = nodes[0].node.get_and_clear_pending_events();
3054 assert_eq!(events.len(), 5);
3055 let mut first_claimed = false;
3056 for event in events {
3058 Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3059 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
3060 assert!(!first_claimed);
3061 first_claimed = true;
3063 assert_eq!(payment_preimage, our_payment_preimage_2);
3064 assert_eq!(payment_hash, payment_hash_2);
3067 Event::PaymentPathSuccessful { .. } => {},
3068 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
3069 _ => panic!("Unexpected event"),
3072 check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
3075 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3076 // Test that in case of a unilateral close onchain, we detect the state of output and
3077 // timeout the HTLC backward accordingly. So here we test that ChannelManager is
3078 // broadcasting the right event to other nodes in payment path.
3079 // A ------------------> B ----------------------> C (timeout)
3080 // B's commitment tx C's commitment tx
3082 // B's HTLC timeout tx B's timeout tx
3084 let chanmon_cfgs = create_chanmon_cfgs(3);
3085 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3086 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3087 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3088 *nodes[0].connect_style.borrow_mut() = connect_style;
3089 *nodes[1].connect_style.borrow_mut() = connect_style;
3090 *nodes[2].connect_style.borrow_mut() = connect_style;
3092 // Create some intial channels
3093 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3094 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3096 // Rebalance the network a bit by relaying one payment thorugh all the channels...
3097 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3098 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3100 let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
3102 // Broadcast legit commitment tx from C on B's chain
3103 let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
3104 check_spends!(commitment_tx[0], chan_2.3);
3105 nodes[2].node.fail_htlc_backwards(&payment_hash);
3106 check_added_monitors!(nodes[2], 0);
3107 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
3108 check_added_monitors!(nodes[2], 1);
3110 let events = nodes[2].node.get_and_clear_pending_msg_events();
3111 assert_eq!(events.len(), 1);
3113 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3114 assert!(update_add_htlcs.is_empty());
3115 assert!(!update_fail_htlcs.is_empty());
3116 assert!(update_fulfill_htlcs.is_empty());
3117 assert!(update_fail_malformed_htlcs.is_empty());
3118 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
3120 _ => panic!("Unexpected event"),
3122 mine_transaction(&nodes[2], &commitment_tx[0]);
3123 check_closed_broadcast!(nodes[2], true);
3124 check_added_monitors!(nodes[2], 1);
3125 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3126 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3127 assert_eq!(node_txn.len(), 0);
3129 // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3130 // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3131 mine_transaction(&nodes[1], &commitment_tx[0]);
3132 check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3133 , [nodes[2].node.get_our_node_id()], 100000);
3134 connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3136 let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
3137 if nodes[1].connect_style.borrow().skips_blocks() {
3138 assert_eq!(txn.len(), 1);
3140 assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction
3142 txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0]));
3143 assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3147 mine_transaction(&nodes[1], &timeout_tx);
3148 check_added_monitors!(nodes[1], 1);
3149 check_closed_broadcast!(nodes[1], true);
3151 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3153 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3154 check_added_monitors!(nodes[1], 1);
3155 let events = nodes[1].node.get_and_clear_pending_msg_events();
3156 assert_eq!(events.len(), 1);
3158 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3159 assert!(update_add_htlcs.is_empty());
3160 assert!(!update_fail_htlcs.is_empty());
3161 assert!(update_fulfill_htlcs.is_empty());
3162 assert!(update_fail_malformed_htlcs.is_empty());
3163 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3165 _ => panic!("Unexpected event"),
3168 // Broadcast legit commitment tx from B on A's chain
3169 let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3170 check_spends!(commitment_tx[0], chan_1.3);
3172 mine_transaction(&nodes[0], &commitment_tx[0]);
3173 connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3175 check_closed_broadcast!(nodes[0], true);
3176 check_added_monitors!(nodes[0], 1);
3177 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3178 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3179 assert_eq!(node_txn.len(), 1);
3180 check_spends!(node_txn[0], commitment_tx[0]);
3181 assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3185 fn test_htlc_on_chain_timeout() {
3186 do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3187 do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3188 do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3192 fn test_simple_commitment_revoked_fail_backward() {
3193 // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3194 // and fail backward accordingly.
3196 let chanmon_cfgs = create_chanmon_cfgs(3);
3197 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3198 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3199 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3201 // Create some initial channels
3202 create_announced_chan_between_nodes(&nodes, 0, 1);
3203 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3205 let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3206 // Get the will-be-revoked local txn from nodes[2]
3207 let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3208 // Revoke the old state
3209 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3211 let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3213 mine_transaction(&nodes[1], &revoked_local_txn[0]);
3214 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3215 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3216 check_added_monitors!(nodes[1], 1);
3217 check_closed_broadcast!(nodes[1], true);
3219 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3220 check_added_monitors!(nodes[1], 1);
3221 let events = nodes[1].node.get_and_clear_pending_msg_events();
3222 assert_eq!(events.len(), 1);
3224 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3225 assert!(update_add_htlcs.is_empty());
3226 assert_eq!(update_fail_htlcs.len(), 1);
3227 assert!(update_fulfill_htlcs.is_empty());
3228 assert!(update_fail_malformed_htlcs.is_empty());
3229 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3231 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3232 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3233 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3235 _ => panic!("Unexpected event"),
3239 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3240 // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3241 // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3242 // commitment transaction anymore.
3243 // To do this, we have the peer which will broadcast a revoked commitment transaction send
3244 // a number of update_fail/commitment_signed updates without ever sending the RAA in
3245 // response to our commitment_signed. This is somewhat misbehavior-y, though not
3246 // technically disallowed and we should probably handle it reasonably.
3247 // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3248 // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3250 // * Once we move it out of our holding cell/add it, we will immediately include it in a
3251 // commitment_signed (implying it will be in the latest remote commitment transaction).
3252 // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3253 // and once they revoke the previous commitment transaction (allowing us to send a new
3254 // commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3255 let chanmon_cfgs = create_chanmon_cfgs(3);
3256 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3257 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3258 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3260 // Create some initial channels
3261 create_announced_chan_between_nodes(&nodes, 0, 1);
3262 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3264 let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3265 // Get the will-be-revoked local txn from nodes[2]
3266 let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3267 assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3268 // Revoke the old state
3269 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3271 let value = if use_dust {
3272 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3273 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3274 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3275 .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
3278 let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3279 let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3280 let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3282 nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3283 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3284 check_added_monitors!(nodes[2], 1);
3285 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3286 assert!(updates.update_add_htlcs.is_empty());
3287 assert!(updates.update_fulfill_htlcs.is_empty());
3288 assert!(updates.update_fail_malformed_htlcs.is_empty());
3289 assert_eq!(updates.update_fail_htlcs.len(), 1);
3290 assert!(updates.update_fee.is_none());
3291 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3292 let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3293 // Drop the last RAA from 3 -> 2
3295 nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3296 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3297 check_added_monitors!(nodes[2], 1);
3298 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3299 assert!(updates.update_add_htlcs.is_empty());
3300 assert!(updates.update_fulfill_htlcs.is_empty());
3301 assert!(updates.update_fail_malformed_htlcs.is_empty());
3302 assert_eq!(updates.update_fail_htlcs.len(), 1);
3303 assert!(updates.update_fee.is_none());
3304 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3305 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3306 check_added_monitors!(nodes[1], 1);
3307 // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3308 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3309 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3310 check_added_monitors!(nodes[2], 1);
3312 nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3313 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3314 check_added_monitors!(nodes[2], 1);
3315 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3316 assert!(updates.update_add_htlcs.is_empty());
3317 assert!(updates.update_fulfill_htlcs.is_empty());
3318 assert!(updates.update_fail_malformed_htlcs.is_empty());
3319 assert_eq!(updates.update_fail_htlcs.len(), 1);
3320 assert!(updates.update_fee.is_none());
3321 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3322 // At this point first_payment_hash has dropped out of the latest two commitment
3323 // transactions that nodes[1] is tracking...
3324 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3325 check_added_monitors!(nodes[1], 1);
3326 // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3327 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3328 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3329 check_added_monitors!(nodes[2], 1);
3331 // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3332 // on nodes[2]'s RAA.
3333 let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3334 nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3335 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3336 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3337 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3338 check_added_monitors!(nodes[1], 0);
3341 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3342 // One monitor for the new revocation preimage, no second on as we won't generate a new
3343 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3344 check_added_monitors!(nodes[1], 1);
3345 let events = nodes[1].node.get_and_clear_pending_events();
3346 assert_eq!(events.len(), 2);
3348 Event::HTLCHandlingFailed { .. } => { },
3349 _ => panic!("Unexpected event"),
3352 Event::PendingHTLCsForwardable { .. } => { },
3353 _ => panic!("Unexpected event"),
3355 // Deliberately don't process the pending fail-back so they all fail back at once after
3356 // block connection just like the !deliver_bs_raa case
3359 let mut failed_htlcs = new_hash_set();
3360 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3362 mine_transaction(&nodes[1], &revoked_local_txn[0]);
3363 check_added_monitors!(nodes[1], 1);
3364 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3366 let events = nodes[1].node.get_and_clear_pending_events();
3367 assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3368 assert!(events.iter().any(|ev| matches!(
3370 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
3372 assert!(events.iter().any(|ev| matches!(
3374 Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3376 assert!(events.iter().any(|ev| matches!(
3378 Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3381 nodes[1].node.process_pending_htlc_forwards();
3382 check_added_monitors!(nodes[1], 1);
3384 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3385 assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3388 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3389 match nodes_2_event {
3390 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3391 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3392 assert_eq!(update_add_htlcs.len(), 1);
3393 assert!(update_fulfill_htlcs.is_empty());
3394 assert!(update_fail_htlcs.is_empty());
3395 assert!(update_fail_malformed_htlcs.is_empty());
3397 _ => panic!("Unexpected event"),
3401 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3402 match nodes_2_event {
3403 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => {
3404 assert_eq!(channel_id, chan_2.2);
3405 assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3407 _ => panic!("Unexpected event"),
3410 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3411 match nodes_0_event {
3412 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3413 assert!(update_add_htlcs.is_empty());
3414 assert_eq!(update_fail_htlcs.len(), 3);
3415 assert!(update_fulfill_htlcs.is_empty());
3416 assert!(update_fail_malformed_htlcs.is_empty());
3417 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3419 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3420 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3421 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3423 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3425 let events = nodes[0].node.get_and_clear_pending_events();
3426 assert_eq!(events.len(), 6);
3428 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3429 assert!(failed_htlcs.insert(payment_hash.0));
3430 // If we delivered B's RAA we got an unknown preimage error, not something
3431 // that we should update our routing table for.
3432 if !deliver_bs_raa {
3433 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3436 _ => panic!("Unexpected event"),
3439 Event::PaymentFailed { ref payment_hash, .. } => {
3440 assert_eq!(*payment_hash, first_payment_hash);
3442 _ => panic!("Unexpected event"),
3445 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3446 assert!(failed_htlcs.insert(payment_hash.0));
3448 _ => panic!("Unexpected event"),
3451 Event::PaymentFailed { ref payment_hash, .. } => {
3452 assert_eq!(*payment_hash, second_payment_hash);
3454 _ => panic!("Unexpected event"),
3457 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3458 assert!(failed_htlcs.insert(payment_hash.0));
3460 _ => panic!("Unexpected event"),
3463 Event::PaymentFailed { ref payment_hash, .. } => {
3464 assert_eq!(*payment_hash, third_payment_hash);
3466 _ => panic!("Unexpected event"),
3469 _ => panic!("Unexpected event"),
3472 // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3474 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3475 _ => panic!("Unexpected event"),
3478 assert!(failed_htlcs.contains(&first_payment_hash.0));
3479 assert!(failed_htlcs.contains(&second_payment_hash.0));
3480 assert!(failed_htlcs.contains(&third_payment_hash.0));
3484 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3485 do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3486 do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3487 do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3488 do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3492 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3493 do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3494 do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3495 do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3496 do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3500 fn fail_backward_pending_htlc_upon_channel_failure() {
3501 let chanmon_cfgs = create_chanmon_cfgs(2);
3502 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3503 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3504 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3505 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3507 // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3509 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3510 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3511 PaymentId(payment_hash.0)).unwrap();
3512 check_added_monitors!(nodes[0], 1);
3514 let payment_event = {
3515 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3516 assert_eq!(events.len(), 1);
3517 SendEvent::from_event(events.remove(0))
3519 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3520 assert_eq!(payment_event.msgs.len(), 1);
3523 // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3524 let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3526 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3527 RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3528 check_added_monitors!(nodes[0], 0);
3530 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3533 // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3535 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3537 let secp_ctx = Secp256k1::new();
3538 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3539 let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
3540 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
3541 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
3542 &route.paths[0], 50_000, &recipient_onion_fields, current_height, &None).unwrap();
3543 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3544 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
3546 // Send a 0-msat update_add_htlc to fail the channel.
3547 let update_add_htlc = msgs::UpdateAddHTLC {
3553 onion_routing_packet,
3554 skimmed_fee_msat: None,
3555 blinding_point: None,
3557 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3559 let events = nodes[0].node.get_and_clear_pending_events();
3560 assert_eq!(events.len(), 3);
3561 // Check that Alice fails backward the pending HTLC from the second payment.
3563 Event::PaymentPathFailed { payment_hash, .. } => {
3564 assert_eq!(payment_hash, failed_payment_hash);
3566 _ => panic!("Unexpected event"),
3569 Event::PaymentFailed { payment_hash, .. } => {
3570 assert_eq!(payment_hash, failed_payment_hash);
3572 _ => panic!("Unexpected event"),
3575 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3576 assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3578 _ => panic!("Unexpected event {:?}", events[1]),
3580 check_closed_broadcast!(nodes[0], true);
3581 check_added_monitors!(nodes[0], 1);
3585 fn test_htlc_ignore_latest_remote_commitment() {
3586 // Test that HTLC transactions spending the latest remote commitment transaction are simply
3587 // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3588 let chanmon_cfgs = create_chanmon_cfgs(2);
3589 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3590 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3591 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3592 if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3593 // We rely on the ability to connect a block redundantly, which isn't allowed via
3594 // `chain::Listen`, so we never run the test if we randomly get assigned that
3598 let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
3600 route_payment(&nodes[0], &[&nodes[1]], 10000000);
3601 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3602 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3603 check_closed_broadcast!(nodes[0], true);
3604 check_added_monitors!(nodes[0], 1);
3605 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3607 let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
3608 assert_eq!(node_txn.len(), 2);
3609 check_spends!(node_txn[0], funding_tx);
3610 check_spends!(node_txn[1], node_txn[0]);
3612 let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
3613 connect_block(&nodes[1], &block);
3614 check_closed_broadcast!(nodes[1], true);
3615 check_added_monitors!(nodes[1], 1);
3616 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3618 // Duplicate the connect_block call since this may happen due to other listeners
3619 // registering new transactions
3620 connect_block(&nodes[1], &block);
3624 fn test_force_close_fail_back() {
3625 // Check which HTLCs are failed-backwards on channel force-closure
3626 let chanmon_cfgs = create_chanmon_cfgs(3);
3627 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3628 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3629 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3630 create_announced_chan_between_nodes(&nodes, 0, 1);
3631 create_announced_chan_between_nodes(&nodes, 1, 2);
3633 let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3635 let mut payment_event = {
3636 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3637 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3638 check_added_monitors!(nodes[0], 1);
3640 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3641 assert_eq!(events.len(), 1);
3642 SendEvent::from_event(events.remove(0))
3645 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3646 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3648 expect_pending_htlcs_forwardable!(nodes[1]);
3650 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3651 assert_eq!(events_2.len(), 1);
3652 payment_event = SendEvent::from_event(events_2.remove(0));
3653 assert_eq!(payment_event.msgs.len(), 1);
3655 check_added_monitors!(nodes[1], 1);
3656 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3657 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3658 check_added_monitors!(nodes[2], 1);
3659 let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3661 // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3662 // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3663 // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3665 nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3666 check_closed_broadcast!(nodes[2], true);
3667 check_added_monitors!(nodes[2], 1);
3668 check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3669 let commitment_tx = {
3670 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3671 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3672 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3673 // back to nodes[1] upon timeout otherwise.
3674 assert_eq!(node_txn.len(), 1);
3678 mine_transaction(&nodes[1], &commitment_tx);
3680 // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3681 check_closed_broadcast!(nodes[1], true);
3682 check_added_monitors!(nodes[1], 1);
3683 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3685 // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3687 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3688 .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3690 mine_transaction(&nodes[2], &commitment_tx);
3691 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
3692 assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
3693 let htlc_tx = node_txn.pop().unwrap();
3694 assert_eq!(htlc_tx.input.len(), 1);
3695 assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
3696 assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
3697 assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
3699 check_spends!(htlc_tx, commitment_tx);
3703 fn test_dup_events_on_peer_disconnect() {
3704 // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3705 // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3706 // as we used to generate the event immediately upon receipt of the payment preimage in the
3707 // update_fulfill_htlc message.
3709 let chanmon_cfgs = create_chanmon_cfgs(2);
3710 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3711 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3712 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3713 create_announced_chan_between_nodes(&nodes, 0, 1);
3715 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3717 nodes[1].node.claim_funds(payment_preimage);
3718 expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3719 check_added_monitors!(nodes[1], 1);
3720 let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3721 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3722 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
3724 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3725 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3727 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3728 reconnect_args.pending_htlc_claims.0 = 1;
3729 reconnect_nodes(reconnect_args);
3730 expect_payment_path_successful!(nodes[0]);
3734 fn test_peer_disconnected_before_funding_broadcasted() {
3735 // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3736 // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
3737 let chanmon_cfgs = create_chanmon_cfgs(2);
3738 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3739 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3740 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3742 // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3743 // broadcasted, even though it's created by `nodes[0]`.
3744 let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
3745 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3746 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3747 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3748 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3750 let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3751 assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3753 assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3755 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3756 assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3758 // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3759 // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3762 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3765 // The peers disconnect before the funding is broadcasted.
3766 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3767 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3769 // The time for peers to reconnect expires.
3770 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
3771 nodes[0].node.timer_tick_occurred();
3774 // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` and a
3775 // `DiscardFunding` event when the peers are disconnected and do not reconnect before the
3776 // funding transaction is broadcasted.
3777 check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
3778 , [nodes[1].node.get_our_node_id()], 1000000);
3779 check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
3780 , [nodes[0].node.get_our_node_id()], 1000000);
3784 fn test_simple_peer_disconnect() {
3785 // Test that we can reconnect when there are no lost messages
3786 let chanmon_cfgs = create_chanmon_cfgs(3);
3787 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3788 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3789 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3790 create_announced_chan_between_nodes(&nodes, 0, 1);
3791 create_announced_chan_between_nodes(&nodes, 1, 2);
3793 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3794 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3795 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3796 reconnect_args.send_channel_ready = (true, true);
3797 reconnect_nodes(reconnect_args);
3799 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3800 let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3801 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3802 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3804 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3805 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3806 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3808 let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3809 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3810 let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3811 let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3813 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3814 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3816 claim_payment_along_route(
3817 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_3)
3820 fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3822 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3823 reconnect_args.pending_cell_htlc_fails.0 = 1;
3824 reconnect_args.pending_cell_htlc_claims.0 = 1;
3825 reconnect_nodes(reconnect_args);
3827 let events = nodes[0].node.get_and_clear_pending_events();
3828 assert_eq!(events.len(), 4);
3830 Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3831 assert_eq!(payment_preimage, payment_preimage_3);
3832 assert_eq!(payment_hash, payment_hash_3);
3834 _ => panic!("Unexpected event"),
3837 Event::PaymentPathSuccessful { .. } => {},
3838 _ => panic!("Unexpected event"),
3841 Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3842 assert_eq!(payment_hash, payment_hash_5);
3843 assert!(payment_failed_permanently);
3845 _ => panic!("Unexpected event"),
3848 Event::PaymentFailed { payment_hash, .. } => {
3849 assert_eq!(payment_hash, payment_hash_5);
3851 _ => panic!("Unexpected event"),
3854 check_added_monitors(&nodes[0], 1);
3856 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3857 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3860 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3861 // Test that we can reconnect when in-flight HTLC updates get dropped
3862 let chanmon_cfgs = create_chanmon_cfgs(2);
3863 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3864 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3865 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3867 let mut as_channel_ready = None;
3868 let channel_id = if messages_delivered == 0 {
3869 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3870 as_channel_ready = Some(channel_ready);
3871 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3872 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3873 // it before the channel_reestablish message.
3876 create_announced_chan_between_nodes(&nodes, 0, 1).2
3879 let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3881 let payment_event = {
3882 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3883 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3884 check_added_monitors!(nodes[0], 1);
3886 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3887 assert_eq!(events.len(), 1);
3888 SendEvent::from_event(events.remove(0))
3890 assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3892 if messages_delivered < 2 {
3893 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3895 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3896 if messages_delivered >= 3 {
3897 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3898 check_added_monitors!(nodes[1], 1);
3899 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3901 if messages_delivered >= 4 {
3902 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3903 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3904 check_added_monitors!(nodes[0], 1);
3906 if messages_delivered >= 5 {
3907 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3908 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3909 // No commitment_signed so get_event_msg's assert(len == 1) passes
3910 check_added_monitors!(nodes[0], 1);
3912 if messages_delivered >= 6 {
3913 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3914 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3915 check_added_monitors!(nodes[1], 1);
3922 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3923 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3924 if messages_delivered < 3 {
3925 if simulate_broken_lnd {
3926 // lnd has a long-standing bug where they send a channel_ready prior to a
3927 // channel_reestablish if you reconnect prior to channel_ready time.
3929 // Here we simulate that behavior, delivering a channel_ready immediately on
3930 // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3931 // in `reconnect_nodes` but we currently don't fail based on that.
3933 // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3934 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3936 // Even if the channel_ready messages get exchanged, as long as nothing further was
3937 // received on either side, both sides will need to resend them.
3938 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3939 reconnect_args.send_channel_ready = (true, true);
3940 reconnect_args.pending_htlc_adds.1 = 1;
3941 reconnect_nodes(reconnect_args);
3942 } else if messages_delivered == 3 {
3943 // nodes[0] still wants its RAA + commitment_signed
3944 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3945 reconnect_args.pending_responding_commitment_signed.0 = true;
3946 reconnect_args.pending_raa.0 = true;
3947 reconnect_nodes(reconnect_args);
3948 } else if messages_delivered == 4 {
3949 // nodes[0] still wants its commitment_signed
3950 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3951 reconnect_args.pending_responding_commitment_signed.0 = true;
3952 reconnect_nodes(reconnect_args);
3953 } else if messages_delivered == 5 {
3954 // nodes[1] still wants its final RAA
3955 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3956 reconnect_args.pending_raa.1 = true;
3957 reconnect_nodes(reconnect_args);
3958 } else if messages_delivered == 6 {
3959 // Everything was delivered...
3960 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3963 let events_1 = nodes[1].node.get_and_clear_pending_events();
3964 if messages_delivered == 0 {
3965 assert_eq!(events_1.len(), 2);
3967 Event::ChannelReady { .. } => { },
3968 _ => panic!("Unexpected event"),
3971 Event::PendingHTLCsForwardable { .. } => { },
3972 _ => panic!("Unexpected event"),
3975 assert_eq!(events_1.len(), 1);
3977 Event::PendingHTLCsForwardable { .. } => { },
3978 _ => panic!("Unexpected event"),
3982 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3983 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3984 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3986 nodes[1].node.process_pending_htlc_forwards();
3988 let events_2 = nodes[1].node.get_and_clear_pending_events();
3989 assert_eq!(events_2.len(), 1);
3991 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
3992 assert_eq!(payment_hash_1, *payment_hash);
3993 assert_eq!(amount_msat, 1_000_000);
3994 assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3995 assert_eq!(via_channel_id, Some(channel_id));
3997 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
3998 assert!(payment_preimage.is_none());
3999 assert_eq!(payment_secret_1, *payment_secret);
4001 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
4004 _ => panic!("Unexpected event"),
4007 nodes[1].node.claim_funds(payment_preimage_1);
4008 check_added_monitors!(nodes[1], 1);
4009 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4011 let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
4012 assert_eq!(events_3.len(), 1);
4013 let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
4014 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
4015 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4016 assert!(updates.update_add_htlcs.is_empty());
4017 assert!(updates.update_fail_htlcs.is_empty());
4018 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4019 assert!(updates.update_fail_malformed_htlcs.is_empty());
4020 assert!(updates.update_fee.is_none());
4021 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
4023 _ => panic!("Unexpected event"),
4026 if messages_delivered >= 1 {
4027 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
4029 let events_4 = nodes[0].node.get_and_clear_pending_events();
4030 assert_eq!(events_4.len(), 1);
4032 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4033 assert_eq!(payment_preimage_1, *payment_preimage);
4034 assert_eq!(payment_hash_1, *payment_hash);
4036 _ => panic!("Unexpected event"),
4039 if messages_delivered >= 2 {
4040 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
4041 check_added_monitors!(nodes[0], 1);
4042 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4044 if messages_delivered >= 3 {
4045 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4046 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4047 check_added_monitors!(nodes[1], 1);
4049 if messages_delivered >= 4 {
4050 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
4051 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4052 // No commitment_signed so get_event_msg's assert(len == 1) passes
4053 check_added_monitors!(nodes[1], 1);
4055 if messages_delivered >= 5 {
4056 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4057 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4058 check_added_monitors!(nodes[0], 1);
4065 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4066 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4067 if messages_delivered < 2 {
4068 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4069 reconnect_args.pending_htlc_claims.0 = 1;
4070 reconnect_nodes(reconnect_args);
4071 if messages_delivered < 1 {
4072 expect_payment_sent!(nodes[0], payment_preimage_1);
4074 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4076 } else if messages_delivered == 2 {
4077 // nodes[0] still wants its RAA + commitment_signed
4078 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4079 reconnect_args.pending_responding_commitment_signed.1 = true;
4080 reconnect_args.pending_raa.1 = true;
4081 reconnect_nodes(reconnect_args);
4082 } else if messages_delivered == 3 {
4083 // nodes[0] still wants its commitment_signed
4084 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4085 reconnect_args.pending_responding_commitment_signed.1 = true;
4086 reconnect_nodes(reconnect_args);
4087 } else if messages_delivered == 4 {
4088 // nodes[1] still wants its final RAA
4089 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4090 reconnect_args.pending_raa.0 = true;
4091 reconnect_nodes(reconnect_args);
4092 } else if messages_delivered == 5 {
4093 // Everything was delivered...
4094 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4097 if messages_delivered == 1 || messages_delivered == 2 {
4098 expect_payment_path_successful!(nodes[0]);
4100 if messages_delivered <= 5 {
4101 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4102 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4104 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4106 if messages_delivered > 2 {
4107 expect_payment_path_successful!(nodes[0]);
4110 // Channel should still work fine...
4111 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4112 let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
4113 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4117 fn test_drop_messages_peer_disconnect_a() {
4118 do_test_drop_messages_peer_disconnect(0, true);
4119 do_test_drop_messages_peer_disconnect(0, false);
4120 do_test_drop_messages_peer_disconnect(1, false);
4121 do_test_drop_messages_peer_disconnect(2, false);
4125 fn test_drop_messages_peer_disconnect_b() {
4126 do_test_drop_messages_peer_disconnect(3, false);
4127 do_test_drop_messages_peer_disconnect(4, false);
4128 do_test_drop_messages_peer_disconnect(5, false);
4129 do_test_drop_messages_peer_disconnect(6, false);
4133 fn test_channel_ready_without_best_block_updated() {
4134 // Previously, if we were offline when a funding transaction was locked in, and then we came
4135 // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
4136 // generate a channel_ready until a later best_block_updated. This tests that we generate the
4137 // channel_ready immediately instead.
4138 let chanmon_cfgs = create_chanmon_cfgs(2);
4139 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4140 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4141 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4142 *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
4144 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4146 let conf_height = nodes[0].best_block_info().1 + 1;
4147 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4148 let block_txn = [funding_tx];
4149 let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
4150 let conf_block_header = nodes[0].get_block_header(conf_height);
4151 nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
4153 // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4154 let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4155 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4159 fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
4160 let chanmon_cfgs = create_chanmon_cfgs(2);
4161 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4162 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4163 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4165 // Let channel_manager get ahead of chain_monitor by 1 block.
4166 // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4167 // in case where client calls block_connect on channel_manager first and then on chain_monitor.
4168 let height_1 = nodes[0].best_block_info().1 + 1;
4169 let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4171 nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4172 nodes[0].node.block_connected(&block_1, height_1);
4174 // Create channel, and it gets added to chain_monitor in funding_created.
4175 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4177 // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
4178 // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
4179 // was running ahead of chain_monitor at the time of funding_created.
4180 // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4181 // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4182 confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4183 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4185 // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4186 let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4187 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4191 fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
4192 let chanmon_cfgs = create_chanmon_cfgs(2);
4193 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4194 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4195 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4197 // Let chain_monitor get ahead of channel_manager by 1 block.
4198 // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4199 // in case where client calls block_connect on chain_monitor first and then on channel_manager.
4200 let height_1 = nodes[0].best_block_info().1 + 1;
4201 let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4203 nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4204 nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
4206 // Create channel, and it gets added to chain_monitor in funding_created.
4207 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4209 // channel_manager can't really skip block_1, it should get it eventually.
4210 nodes[0].node.block_connected(&block_1, height_1);
4212 // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
4213 // the block before block_1, since that was populated by channel_manager, and channel_manager was
4214 // running behind at the time of funding_created.
4215 // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4216 // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4217 confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4218 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4220 // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4221 let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4222 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4226 fn test_drop_messages_peer_disconnect_dual_htlc() {
4227 // Test that we can handle reconnecting when both sides of a channel have pending
4228 // commitment_updates when we disconnect.
4229 let chanmon_cfgs = create_chanmon_cfgs(2);
4230 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4231 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4232 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4233 create_announced_chan_between_nodes(&nodes, 0, 1);
4235 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
4237 // Now try to send a second payment which will fail to send
4238 let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4239 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
4240 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
4241 check_added_monitors!(nodes[0], 1);
4243 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4244 assert_eq!(events_1.len(), 1);
4246 MessageSendEvent::UpdateHTLCs { .. } => {},
4247 _ => panic!("Unexpected event"),
4250 nodes[1].node.claim_funds(payment_preimage_1);
4251 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4252 check_added_monitors!(nodes[1], 1);
4254 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4255 assert_eq!(events_2.len(), 1);
4257 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4258 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4259 assert!(update_add_htlcs.is_empty());
4260 assert_eq!(update_fulfill_htlcs.len(), 1);
4261 assert!(update_fail_htlcs.is_empty());
4262 assert!(update_fail_malformed_htlcs.is_empty());
4263 assert!(update_fee.is_none());
4265 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4266 let events_3 = nodes[0].node.get_and_clear_pending_events();
4267 assert_eq!(events_3.len(), 1);
4269 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4270 assert_eq!(*payment_preimage, payment_preimage_1);
4271 assert_eq!(*payment_hash, payment_hash_1);
4273 _ => panic!("Unexpected event"),
4276 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4277 let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4278 // No commitment_signed so get_event_msg's assert(len == 1) passes
4279 check_added_monitors!(nodes[0], 1);
4281 _ => panic!("Unexpected event"),
4284 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4285 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4287 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
4288 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
4290 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4291 assert_eq!(reestablish_1.len(), 1);
4292 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
4293 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
4295 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4296 assert_eq!(reestablish_2.len(), 1);
4298 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4299 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4300 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4301 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4303 assert!(as_resp.0.is_none());
4304 assert!(bs_resp.0.is_none());
4306 assert!(bs_resp.1.is_none());
4307 assert!(bs_resp.2.is_none());
4309 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4311 assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4312 assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4313 assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4314 assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4315 assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4316 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4317 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4318 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4319 // No commitment_signed so get_event_msg's assert(len == 1) passes
4320 check_added_monitors!(nodes[1], 1);
4322 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4323 let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4324 assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4325 assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4326 assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4327 assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4328 assert!(bs_second_commitment_signed.update_fee.is_none());
4329 check_added_monitors!(nodes[1], 1);
4331 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4332 let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4333 assert!(as_commitment_signed.update_add_htlcs.is_empty());
4334 assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4335 assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4336 assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4337 assert!(as_commitment_signed.update_fee.is_none());
4338 check_added_monitors!(nodes[0], 1);
4340 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4341 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4342 // No commitment_signed so get_event_msg's assert(len == 1) passes
4343 check_added_monitors!(nodes[0], 1);
4345 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4346 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4347 // No commitment_signed so get_event_msg's assert(len == 1) passes
4348 check_added_monitors!(nodes[1], 1);
4350 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4351 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4352 check_added_monitors!(nodes[1], 1);
4354 expect_pending_htlcs_forwardable!(nodes[1]);
4356 let events_5 = nodes[1].node.get_and_clear_pending_events();
4357 assert_eq!(events_5.len(), 1);
4359 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4360 assert_eq!(payment_hash_2, *payment_hash);
4362 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
4363 assert!(payment_preimage.is_none());
4364 assert_eq!(payment_secret_2, *payment_secret);
4366 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
4369 _ => panic!("Unexpected event"),
4372 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4373 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4374 check_added_monitors!(nodes[0], 1);
4376 expect_payment_path_successful!(nodes[0]);
4377 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4380 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4381 // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4382 // to avoid our counterparty failing the channel.
4383 let chanmon_cfgs = create_chanmon_cfgs(2);
4384 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4385 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4386 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4388 create_announced_chan_between_nodes(&nodes, 0, 1);
4390 let our_payment_hash = if send_partial_mpp {
4391 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4392 // Use the utility function send_payment_along_path to send the payment with MPP data which
4393 // indicates there are more HTLCs coming.
4394 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4395 let payment_id = PaymentId([42; 32]);
4396 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
4397 RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap();
4398 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
4399 RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id,
4400 &None, session_privs[0]).unwrap();
4401 check_added_monitors!(nodes[0], 1);
4402 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4403 assert_eq!(events.len(), 1);
4404 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4405 // hop should *not* yet generate any PaymentClaimable event(s).
4406 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4409 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4412 let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
4413 connect_block(&nodes[0], &block);
4414 connect_block(&nodes[1], &block);
4415 let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4416 for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4417 block.header.prev_blockhash = block.block_hash();
4418 connect_block(&nodes[0], &block);
4419 connect_block(&nodes[1], &block);
4422 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4424 check_added_monitors!(nodes[1], 1);
4425 let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4426 assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4427 assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4428 assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4429 assert!(htlc_timeout_updates.update_fee.is_none());
4431 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4432 commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4433 // 100_000 msat as u64, followed by the height at which we failed back above
4434 let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4435 expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4436 expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4440 fn test_htlc_timeout() {
4441 do_test_htlc_timeout(true);
4442 do_test_htlc_timeout(false);
4445 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4446 // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4447 let chanmon_cfgs = create_chanmon_cfgs(3);
4448 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4449 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4450 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4451 create_announced_chan_between_nodes(&nodes, 0, 1);
4452 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4454 // Make sure all nodes are at the same starting height
4455 connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4456 connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4457 connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4459 // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4460 let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4461 nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4462 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4463 assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4464 check_added_monitors!(nodes[1], 1);
4466 // Now attempt to route a second payment, which should be placed in the holding cell
4467 let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4468 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4469 sending_node.node.send_payment_with_route(&route, second_payment_hash,
4470 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4472 check_added_monitors!(nodes[0], 1);
4473 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4474 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4475 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4476 expect_pending_htlcs_forwardable!(nodes[1]);
4478 check_added_monitors!(nodes[1], 0);
4480 connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4481 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4482 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4483 connect_blocks(&nodes[1], 1);
4486 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4487 check_added_monitors!(nodes[1], 1);
4488 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4489 assert_eq!(fail_commit.len(), 1);
4490 match fail_commit[0] {
4491 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4492 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4493 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4495 _ => unreachable!(),
4497 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4499 expect_payment_failed!(nodes[1], second_payment_hash, false);
4504 fn test_holding_cell_htlc_add_timeouts() {
4505 do_test_holding_cell_htlc_add_timeouts(false);
4506 do_test_holding_cell_htlc_add_timeouts(true);
4509 macro_rules! check_spendable_outputs {
4510 ($node: expr, $keysinterface: expr) => {
4512 let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4513 let mut txn = Vec::new();
4514 let mut all_outputs = Vec::new();
4515 let secp_ctx = Secp256k1::new();
4516 for event in events.drain(..) {
4518 Event::SpendableOutputs { mut outputs, channel_id: _ } => {
4519 for outp in outputs.drain(..) {
4520 txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
4521 all_outputs.push(outp);
4524 _ => panic!("Unexpected event"),
4527 if all_outputs.len() > 1 {
4528 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
4538 fn test_claim_sizeable_push_msat() {
4539 // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4540 let chanmon_cfgs = create_chanmon_cfgs(2);
4541 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4542 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4543 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4545 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4546 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4547 check_closed_broadcast!(nodes[1], true);
4548 check_added_monitors!(nodes[1], 1);
4549 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
4550 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4551 assert_eq!(node_txn.len(), 1);
4552 check_spends!(node_txn[0], chan.3);
4553 assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4555 mine_transaction(&nodes[1], &node_txn[0]);
4556 connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4558 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4559 assert_eq!(spend_txn.len(), 1);
4560 assert_eq!(spend_txn[0].input.len(), 1);
4561 check_spends!(spend_txn[0], node_txn[0]);
4562 assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4566 fn test_claim_on_remote_sizeable_push_msat() {
4567 // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4568 // to_remote output is encumbered by a P2WPKH
4569 let chanmon_cfgs = create_chanmon_cfgs(2);
4570 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4571 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4572 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4574 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4575 nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4576 check_closed_broadcast!(nodes[0], true);
4577 check_added_monitors!(nodes[0], 1);
4578 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
4580 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4581 assert_eq!(node_txn.len(), 1);
4582 check_spends!(node_txn[0], chan.3);
4583 assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4585 mine_transaction(&nodes[1], &node_txn[0]);
4586 check_closed_broadcast!(nodes[1], true);
4587 check_added_monitors!(nodes[1], 1);
4588 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4589 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4591 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4592 assert_eq!(spend_txn.len(), 1);
4593 check_spends!(spend_txn[0], node_txn[0]);
4597 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4598 // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4599 // to_remote output is encumbered by a P2WPKH
4601 let chanmon_cfgs = create_chanmon_cfgs(2);
4602 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4603 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4604 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4606 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4607 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4608 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4609 assert_eq!(revoked_local_txn[0].input.len(), 1);
4610 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4612 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4613 mine_transaction(&nodes[1], &revoked_local_txn[0]);
4614 check_closed_broadcast!(nodes[1], true);
4615 check_added_monitors!(nodes[1], 1);
4616 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4618 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4619 mine_transaction(&nodes[1], &node_txn[0]);
4620 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4622 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4623 assert_eq!(spend_txn.len(), 3);
4624 check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4625 check_spends!(spend_txn[1], node_txn[0]);
4626 check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4630 fn test_static_spendable_outputs_preimage_tx() {
4631 let chanmon_cfgs = create_chanmon_cfgs(2);
4632 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4633 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4634 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4636 // Create some initial channels
4637 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4639 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4641 let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4642 assert_eq!(commitment_tx[0].input.len(), 1);
4643 assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4645 // Settle A's commitment tx on B's chain
4646 nodes[1].node.claim_funds(payment_preimage);
4647 expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4648 check_added_monitors!(nodes[1], 1);
4649 mine_transaction(&nodes[1], &commitment_tx[0]);
4650 check_added_monitors!(nodes[1], 1);
4651 let events = nodes[1].node.get_and_clear_pending_msg_events();
4653 MessageSendEvent::UpdateHTLCs { .. } => {},
4654 _ => panic!("Unexpected event"),
4657 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4658 _ => panic!("Unexepected event"),
4661 // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4662 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4663 assert_eq!(node_txn.len(), 1);
4664 check_spends!(node_txn[0], commitment_tx[0]);
4665 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4667 mine_transaction(&nodes[1], &node_txn[0]);
4668 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4669 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4671 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4672 assert_eq!(spend_txn.len(), 1);
4673 check_spends!(spend_txn[0], node_txn[0]);
4677 fn test_static_spendable_outputs_timeout_tx() {
4678 let chanmon_cfgs = create_chanmon_cfgs(2);
4679 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4680 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4681 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4683 // Create some initial channels
4684 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4686 // Rebalance the network a bit by relaying one payment through all the channels ...
4687 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4689 let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4691 let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4692 assert_eq!(commitment_tx[0].input.len(), 1);
4693 assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4695 // Settle A's commitment tx on B' chain
4696 mine_transaction(&nodes[1], &commitment_tx[0]);
4697 check_added_monitors!(nodes[1], 1);
4698 let events = nodes[1].node.get_and_clear_pending_msg_events();
4700 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4701 _ => panic!("Unexpected event"),
4703 connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4705 // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4706 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4707 assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4708 check_spends!(node_txn[0], commitment_tx[0].clone());
4709 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4711 mine_transaction(&nodes[1], &node_txn[0]);
4712 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4713 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4714 expect_payment_failed!(nodes[1], our_payment_hash, false);
4716 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4717 assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4718 check_spends!(spend_txn[0], commitment_tx[0]);
4719 check_spends!(spend_txn[1], node_txn[0]);
4720 check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4724 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4725 let chanmon_cfgs = create_chanmon_cfgs(2);
4726 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4727 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4728 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4730 // Create some initial channels
4731 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4733 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4734 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4735 assert_eq!(revoked_local_txn[0].input.len(), 1);
4736 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4738 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4740 mine_transaction(&nodes[1], &revoked_local_txn[0]);
4741 check_closed_broadcast!(nodes[1], true);
4742 check_added_monitors!(nodes[1], 1);
4743 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4745 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4746 assert_eq!(node_txn.len(), 1);
4747 assert_eq!(node_txn[0].input.len(), 2);
4748 check_spends!(node_txn[0], revoked_local_txn[0]);
4750 mine_transaction(&nodes[1], &node_txn[0]);
4751 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4753 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4754 assert_eq!(spend_txn.len(), 1);
4755 check_spends!(spend_txn[0], node_txn[0]);
4759 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4760 let mut chanmon_cfgs = create_chanmon_cfgs(2);
4761 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4762 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4763 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4764 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4766 // Create some initial channels
4767 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4769 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4770 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4771 assert_eq!(revoked_local_txn[0].input.len(), 1);
4772 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4774 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4776 // A will generate HTLC-Timeout from revoked commitment tx
4777 mine_transaction(&nodes[0], &revoked_local_txn[0]);
4778 check_closed_broadcast!(nodes[0], true);
4779 check_added_monitors!(nodes[0], 1);
4780 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4781 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4783 let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4784 assert_eq!(revoked_htlc_txn.len(), 1);
4785 assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4786 assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4787 check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4788 assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout
4790 // B will generate justice tx from A's revoked commitment/HTLC tx
4791 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4792 check_closed_broadcast!(nodes[1], true);
4793 check_added_monitors!(nodes[1], 1);
4794 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4796 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4797 assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4798 // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4799 // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4800 // transactions next...
4801 assert_eq!(node_txn[0].input.len(), 3);
4802 check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4804 assert_eq!(node_txn[1].input.len(), 2);
4805 check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4806 if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4807 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4809 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4810 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4813 mine_transaction(&nodes[1], &node_txn[1]);
4814 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4816 // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4817 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4818 assert_eq!(spend_txn.len(), 1);
4819 assert_eq!(spend_txn[0].input.len(), 1);
4820 check_spends!(spend_txn[0], node_txn[1]);
4824 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4825 let mut chanmon_cfgs = create_chanmon_cfgs(2);
4826 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4827 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4828 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4829 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4831 // Create some initial channels
4832 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4834 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4835 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4836 assert_eq!(revoked_local_txn[0].input.len(), 1);
4837 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4839 // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4840 assert_eq!(revoked_local_txn[0].output.len(), 2);
4842 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4844 // B will generate HTLC-Success from revoked commitment tx
4845 mine_transaction(&nodes[1], &revoked_local_txn[0]);
4846 check_closed_broadcast!(nodes[1], true);
4847 check_added_monitors!(nodes[1], 1);
4848 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4849 let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4851 assert_eq!(revoked_htlc_txn.len(), 1);
4852 assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4853 assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4854 check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4856 // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4857 let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4858 assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4860 // A will generate justice tx from B's revoked commitment/HTLC tx
4861 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4862 check_closed_broadcast!(nodes[0], true);
4863 check_added_monitors!(nodes[0], 1);
4864 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4866 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4867 assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4869 // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4870 // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4871 // transactions next...
4872 assert_eq!(node_txn[0].input.len(), 2);
4873 check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4874 if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4875 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4877 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4878 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4881 assert_eq!(node_txn[1].input.len(), 1);
4882 check_spends!(node_txn[1], revoked_htlc_txn[0]);
4884 mine_transaction(&nodes[0], &node_txn[1]);
4885 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4887 // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4888 // didn't try to generate any new transactions.
4890 // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4891 let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4892 assert_eq!(spend_txn.len(), 3);
4893 assert_eq!(spend_txn[0].input.len(), 1);
4894 check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4895 assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4896 check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4897 check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4901 fn test_onchain_to_onchain_claim() {
4902 // Test that in case of channel closure, we detect the state of output and claim HTLC
4903 // on downstream peer's remote commitment tx.
4904 // First, have C claim an HTLC against its own latest commitment transaction.
4905 // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4907 // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4910 let chanmon_cfgs = create_chanmon_cfgs(3);
4911 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4912 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4913 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4915 // Create some initial channels
4916 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4917 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4919 // Ensure all nodes are at the same height
4920 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4921 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4922 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4923 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4925 // Rebalance the network a bit by relaying one payment through all the channels ...
4926 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4927 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4929 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4930 let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4931 check_spends!(commitment_tx[0], chan_2.3);
4932 nodes[2].node.claim_funds(payment_preimage);
4933 expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4934 check_added_monitors!(nodes[2], 1);
4935 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4936 assert!(updates.update_add_htlcs.is_empty());
4937 assert!(updates.update_fail_htlcs.is_empty());
4938 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4939 assert!(updates.update_fail_malformed_htlcs.is_empty());
4941 mine_transaction(&nodes[2], &commitment_tx[0]);
4942 check_closed_broadcast!(nodes[2], true);
4943 check_added_monitors!(nodes[2], 1);
4944 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4946 let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4947 assert_eq!(c_txn.len(), 1);
4948 check_spends!(c_txn[0], commitment_tx[0]);
4949 assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4950 assert!(c_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output
4951 assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx
4953 // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4954 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
4955 check_added_monitors!(nodes[1], 1);
4956 let events = nodes[1].node.get_and_clear_pending_events();
4957 assert_eq!(events.len(), 2);
4959 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4960 _ => panic!("Unexpected event"),
4963 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
4964 next_channel_id, outbound_amount_forwarded_msat, ..
4966 assert_eq!(total_fee_earned_msat, Some(1000));
4967 assert_eq!(prev_channel_id, Some(chan_1.2));
4968 assert_eq!(claim_from_onchain_tx, true);
4969 assert_eq!(next_channel_id, Some(chan_2.2));
4970 assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4972 _ => panic!("Unexpected event"),
4974 check_added_monitors!(nodes[1], 1);
4975 let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4976 assert_eq!(msg_events.len(), 3);
4977 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4978 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4980 match nodes_2_event {
4981 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
4982 _ => panic!("Unexpected event"),
4985 match nodes_0_event {
4986 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4987 assert!(update_add_htlcs.is_empty());
4988 assert!(update_fail_htlcs.is_empty());
4989 assert_eq!(update_fulfill_htlcs.len(), 1);
4990 assert!(update_fail_malformed_htlcs.is_empty());
4991 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4993 _ => panic!("Unexpected event"),
4996 // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
4997 match msg_events[0] {
4998 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4999 _ => panic!("Unexpected event"),
5002 // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
5003 let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5004 mine_transaction(&nodes[1], &commitment_tx[0]);
5005 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5006 let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5007 // ChannelMonitor: HTLC-Success tx
5008 assert_eq!(b_txn.len(), 1);
5009 check_spends!(b_txn[0], commitment_tx[0]);
5010 assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5011 assert!(b_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment
5012 assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx
5014 check_closed_broadcast!(nodes[1], true);
5015 check_added_monitors!(nodes[1], 1);
5019 fn test_duplicate_payment_hash_one_failure_one_success() {
5020 // Topology : A --> B --> C --> D
5021 // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
5022 // Note that because C will refuse to generate two payment secrets for the same payment hash,
5023 // we forward one of the payments onwards to D.
5024 let chanmon_cfgs = create_chanmon_cfgs(4);
5025 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
5026 // When this test was written, the default base fee floated based on the HTLC count.
5027 // It is now fixed, so we simply set the fee to the expected value here.
5028 let mut config = test_default_channel_config();
5029 config.channel_config.forwarding_fee_base_msat = 196;
5030 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
5031 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5032 let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
5034 create_announced_chan_between_nodes(&nodes, 0, 1);
5035 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5036 create_announced_chan_between_nodes(&nodes, 2, 3);
5038 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5039 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5040 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5041 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5042 connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
5044 let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
5046 let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
5047 // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
5048 // script push size limit so that the below script length checks match
5049 // ACCEPTED_HTLC_SCRIPT_WEIGHT.
5050 let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
5051 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
5052 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
5053 send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
5055 let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
5056 assert_eq!(commitment_txn[0].input.len(), 1);
5057 check_spends!(commitment_txn[0], chan_2.3);
5059 mine_transaction(&nodes[1], &commitment_txn[0]);
5060 check_closed_broadcast!(nodes[1], true);
5061 check_added_monitors!(nodes[1], 1);
5062 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
5063 connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
5065 let htlc_timeout_tx;
5066 { // Extract one of the two HTLC-Timeout transaction
5067 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5068 // ChannelMonitor: timeout tx * 2-or-3
5069 assert!(node_txn.len() == 2 || node_txn.len() == 3);
5071 check_spends!(node_txn[0], commitment_txn[0]);
5072 assert_eq!(node_txn[0].input.len(), 1);
5073 assert_eq!(node_txn[0].output.len(), 1);
5075 if node_txn.len() > 2 {
5076 check_spends!(node_txn[1], commitment_txn[0]);
5077 assert_eq!(node_txn[1].input.len(), 1);
5078 assert_eq!(node_txn[1].output.len(), 1);
5079 assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5081 check_spends!(node_txn[2], commitment_txn[0]);
5082 assert_eq!(node_txn[2].input.len(), 1);
5083 assert_eq!(node_txn[2].output.len(), 1);
5084 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
5086 check_spends!(node_txn[1], commitment_txn[0]);
5087 assert_eq!(node_txn[1].input.len(), 1);
5088 assert_eq!(node_txn[1].output.len(), 1);
5089 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5092 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5093 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5094 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
5095 // (with value 900 sats) will be claimed in the below `claim_funds` call.
5096 if node_txn.len() > 2 {
5097 assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5098 htlc_timeout_tx = if node_txn[2].output[0].value.to_sat() < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
5100 htlc_timeout_tx = if node_txn[0].output[0].value.to_sat() < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
5104 nodes[2].node.claim_funds(our_payment_preimage);
5105 expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
5107 mine_transaction(&nodes[2], &commitment_txn[0]);
5108 check_added_monitors!(nodes[2], 2);
5109 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5110 let events = nodes[2].node.get_and_clear_pending_msg_events();
5112 MessageSendEvent::UpdateHTLCs { .. } => {},
5113 _ => panic!("Unexpected event"),
5116 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5117 _ => panic!("Unexepected event"),
5119 let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5120 assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
5121 check_spends!(htlc_success_txn[0], commitment_txn[0]);
5122 check_spends!(htlc_success_txn[1], commitment_txn[0]);
5123 assert_eq!(htlc_success_txn[0].input.len(), 1);
5124 assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5125 assert_eq!(htlc_success_txn[1].input.len(), 1);
5126 assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5127 assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
5128 assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
5130 mine_transaction(&nodes[1], &htlc_timeout_tx);
5131 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5132 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
5133 let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5134 assert!(htlc_updates.update_add_htlcs.is_empty());
5135 assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
5136 let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
5137 assert!(htlc_updates.update_fulfill_htlcs.is_empty());
5138 assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
5139 check_added_monitors!(nodes[1], 1);
5141 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
5142 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5144 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
5146 expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
5148 // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5149 mine_transaction(&nodes[1], &htlc_success_txn[1]);
5150 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
5151 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5152 assert!(updates.update_add_htlcs.is_empty());
5153 assert!(updates.update_fail_htlcs.is_empty());
5154 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5155 assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5156 assert!(updates.update_fail_malformed_htlcs.is_empty());
5157 check_added_monitors!(nodes[1], 1);
5159 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5160 commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5161 expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
5165 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5166 let chanmon_cfgs = create_chanmon_cfgs(2);
5167 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5168 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5169 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5171 // Create some initial channels
5172 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5174 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
5175 let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5176 assert_eq!(local_txn.len(), 1);
5177 assert_eq!(local_txn[0].input.len(), 1);
5178 check_spends!(local_txn[0], chan_1.3);
5180 // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5181 nodes[1].node.claim_funds(payment_preimage);
5182 expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
5183 check_added_monitors!(nodes[1], 1);
5185 mine_transaction(&nodes[1], &local_txn[0]);
5186 check_added_monitors!(nodes[1], 1);
5187 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5188 let events = nodes[1].node.get_and_clear_pending_msg_events();
5190 MessageSendEvent::UpdateHTLCs { .. } => {},
5191 _ => panic!("Unexpected event"),
5194 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5195 _ => panic!("Unexepected event"),
5198 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5199 assert_eq!(node_txn.len(), 1);
5200 assert_eq!(node_txn[0].input.len(), 1);
5201 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5202 check_spends!(node_txn[0], local_txn[0]);
5206 mine_transaction(&nodes[1], &node_tx);
5207 connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5209 // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5210 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5211 assert_eq!(spend_txn.len(), 1);
5212 assert_eq!(spend_txn[0].input.len(), 1);
5213 check_spends!(spend_txn[0], node_tx);
5214 assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5217 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5218 // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5219 // unrevoked commitment transaction.
5220 // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5221 // a remote RAA before they could be failed backwards (and combinations thereof).
5222 // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5223 // use the same payment hashes.
5224 // Thus, we use a six-node network:
5229 // And test where C fails back to A/B when D announces its latest commitment transaction
5230 let chanmon_cfgs = create_chanmon_cfgs(6);
5231 let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5232 // When this test was written, the default base fee floated based on the HTLC count.
5233 // It is now fixed, so we simply set the fee to the expected value here.
5234 let mut config = test_default_channel_config();
5235 config.channel_config.forwarding_fee_base_msat = 196;
5236 let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5237 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5238 let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5240 let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
5241 let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5242 let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5243 let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5244 let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5);
5246 // Rebalance and check output sanity...
5247 send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5248 send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5249 assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
5251 let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
5252 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
5254 let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5256 let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5257 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5259 send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5261 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5263 let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5265 let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5266 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5268 send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5270 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5273 let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5275 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5276 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5279 let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5281 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5282 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5284 // Double-check that six of the new HTLC were added
5285 // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5286 // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5287 assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5288 assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5290 // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5291 // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5292 nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5293 nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5294 nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5295 nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5296 check_added_monitors!(nodes[4], 0);
5298 let failed_destinations = vec![
5299 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5300 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5301 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5302 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5304 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5305 check_added_monitors!(nodes[4], 1);
5307 let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5308 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5309 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5310 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5311 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5312 commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5314 // Fail 3rd below-dust and 7th above-dust HTLCs
5315 nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5316 nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5317 check_added_monitors!(nodes[5], 0);
5319 let failed_destinations_2 = vec![
5320 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5321 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5323 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5324 check_added_monitors!(nodes[5], 1);
5326 let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5327 nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5328 nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5329 commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5331 let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5333 // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5334 let failed_destinations_3 = vec![
5335 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5336 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5337 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5338 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5339 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5340 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5342 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5343 check_added_monitors!(nodes[3], 1);
5344 let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5345 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5346 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5347 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5348 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5349 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5350 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5351 if deliver_last_raa {
5352 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5354 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5357 // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5358 // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5359 // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5360 // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5362 // We now broadcast the latest commitment transaction, which *should* result in failures for
5363 // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5364 // the non-broadcast above-dust HTLCs.
5366 // Alternatively, we may broadcast the previous commitment transaction, which should only
5367 // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5368 let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5370 if announce_latest {
5371 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5373 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5375 let events = nodes[2].node.get_and_clear_pending_events();
5376 let close_event = if deliver_last_raa {
5377 assert_eq!(events.len(), 2 + 6);
5378 events.last().clone().unwrap()
5380 assert_eq!(events.len(), 1);
5381 events.last().clone().unwrap()
5384 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5385 _ => panic!("Unexpected event"),
5388 connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5389 check_closed_broadcast!(nodes[2], true);
5390 if deliver_last_raa {
5391 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
5393 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5394 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5396 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5397 repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5399 repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5402 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5404 check_added_monitors!(nodes[2], 3);
5406 let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5407 assert_eq!(cs_msgs.len(), 2);
5408 let mut a_done = false;
5409 for msg in cs_msgs {
5411 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5412 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5413 // should be failed-backwards here.
5414 let target = if *node_id == nodes[0].node.get_our_node_id() {
5415 // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5416 for htlc in &updates.update_fail_htlcs {
5417 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5419 assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5424 // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5425 for htlc in &updates.update_fail_htlcs {
5426 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5428 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5429 assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5432 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5433 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5434 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5435 if announce_latest {
5436 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5437 if *node_id == nodes[0].node.get_our_node_id() {
5438 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5441 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5443 _ => panic!("Unexpected event"),
5447 let as_events = nodes[0].node.get_and_clear_pending_events();
5448 assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5449 let mut as_faileds = new_hash_set();
5450 let mut as_updates = 0;
5451 for event in as_events.iter() {
5452 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5453 assert!(as_faileds.insert(*payment_hash));
5454 if *payment_hash != payment_hash_2 {
5455 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5457 assert!(!payment_failed_permanently);
5459 if let PathFailure::OnPath { network_update: Some(_) } = failure {
5462 } else if let &Event::PaymentFailed { .. } = event {
5463 } else { panic!("Unexpected event"); }
5465 assert!(as_faileds.contains(&payment_hash_1));
5466 assert!(as_faileds.contains(&payment_hash_2));
5467 if announce_latest {
5468 assert!(as_faileds.contains(&payment_hash_3));
5469 assert!(as_faileds.contains(&payment_hash_5));
5471 assert!(as_faileds.contains(&payment_hash_6));
5473 let bs_events = nodes[1].node.get_and_clear_pending_events();
5474 assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5475 let mut bs_faileds = new_hash_set();
5476 let mut bs_updates = 0;
5477 for event in bs_events.iter() {
5478 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5479 assert!(bs_faileds.insert(*payment_hash));
5480 if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5481 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5483 assert!(!payment_failed_permanently);
5485 if let PathFailure::OnPath { network_update: Some(_) } = failure {
5488 } else if let &Event::PaymentFailed { .. } = event {
5489 } else { panic!("Unexpected event"); }
5491 assert!(bs_faileds.contains(&payment_hash_1));
5492 assert!(bs_faileds.contains(&payment_hash_2));
5493 if announce_latest {
5494 assert!(bs_faileds.contains(&payment_hash_4));
5496 assert!(bs_faileds.contains(&payment_hash_5));
5498 // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5499 // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5500 // unknown-preimage-etc, B should have gotten 2. Thus, in the
5501 // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5502 assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5503 assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5507 fn test_fail_backwards_latest_remote_announce_a() {
5508 do_test_fail_backwards_unrevoked_remote_announce(false, true);
5512 fn test_fail_backwards_latest_remote_announce_b() {
5513 do_test_fail_backwards_unrevoked_remote_announce(true, true);
5517 fn test_fail_backwards_previous_remote_announce() {
5518 do_test_fail_backwards_unrevoked_remote_announce(false, false);
5519 // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5520 // tested for in test_commitment_revoked_fail_backward_exhaustive()
5524 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5525 let chanmon_cfgs = create_chanmon_cfgs(2);
5526 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5527 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5528 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5530 // Create some initial channels
5531 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5533 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5534 let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5535 assert_eq!(local_txn[0].input.len(), 1);
5536 check_spends!(local_txn[0], chan_1.3);
5538 // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5539 mine_transaction(&nodes[0], &local_txn[0]);
5540 check_closed_broadcast!(nodes[0], true);
5541 check_added_monitors!(nodes[0], 1);
5542 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5543 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5545 let htlc_timeout = {
5546 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5547 assert_eq!(node_txn.len(), 1);
5548 assert_eq!(node_txn[0].input.len(), 1);
5549 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5550 check_spends!(node_txn[0], local_txn[0]);
5554 mine_transaction(&nodes[0], &htlc_timeout);
5555 connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5556 expect_payment_failed!(nodes[0], our_payment_hash, false);
5558 // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5559 let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5560 assert_eq!(spend_txn.len(), 3);
5561 check_spends!(spend_txn[0], local_txn[0]);
5562 assert_eq!(spend_txn[1].input.len(), 1);
5563 check_spends!(spend_txn[1], htlc_timeout);
5564 assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5565 assert_eq!(spend_txn[2].input.len(), 2);
5566 check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5567 assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5568 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5572 fn test_key_derivation_params() {
5573 // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5574 // manager rotation to test that `channel_keys_id` returned in
5575 // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5576 // then derive a `delayed_payment_key`.
5578 let chanmon_cfgs = create_chanmon_cfgs(3);
5580 // We manually create the node configuration to backup the seed.
5581 let seed = [42; 32];
5582 let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5583 let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5584 let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5585 let scorer = RwLock::new(test_utils::TestScorer::new());
5586 let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
5587 let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
5588 let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5589 let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5590 node_cfgs.remove(0);
5591 node_cfgs.insert(0, node);
5593 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5594 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5596 // Create some initial channels
5597 // Create a dummy channel to advance index by one and thus test re-derivation correctness
5599 let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5600 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5601 assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5603 // Ensure all nodes are at the same height
5604 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5605 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5606 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5607 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5609 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5610 let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5611 let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5612 assert_eq!(local_txn_1[0].input.len(), 1);
5613 check_spends!(local_txn_1[0], chan_1.3);
5615 // We check funding pubkey are unique
5616 let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5617 let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5618 if from_0_funding_key_0 == from_1_funding_key_0
5619 || from_0_funding_key_0 == from_1_funding_key_1
5620 || from_0_funding_key_1 == from_1_funding_key_0
5621 || from_0_funding_key_1 == from_1_funding_key_1 {
5622 panic!("Funding pubkeys aren't unique");
5625 // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5626 mine_transaction(&nodes[0], &local_txn_1[0]);
5627 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5628 check_closed_broadcast!(nodes[0], true);
5629 check_added_monitors!(nodes[0], 1);
5630 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5632 let htlc_timeout = {
5633 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5634 assert_eq!(node_txn.len(), 1);
5635 assert_eq!(node_txn[0].input.len(), 1);
5636 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5637 check_spends!(node_txn[0], local_txn_1[0]);
5641 mine_transaction(&nodes[0], &htlc_timeout);
5642 connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5643 expect_payment_failed!(nodes[0], our_payment_hash, false);
5645 // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5646 let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5647 let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5648 assert_eq!(spend_txn.len(), 3);
5649 check_spends!(spend_txn[0], local_txn_1[0]);
5650 assert_eq!(spend_txn[1].input.len(), 1);
5651 check_spends!(spend_txn[1], htlc_timeout);
5652 assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5653 assert_eq!(spend_txn[2].input.len(), 2);
5654 check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5655 assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5656 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5660 fn test_static_output_closing_tx() {
5661 let chanmon_cfgs = create_chanmon_cfgs(2);
5662 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5663 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5664 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5666 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5668 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5669 let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5671 mine_transaction(&nodes[0], &closing_tx);
5672 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
5673 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5675 let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5676 assert_eq!(spend_txn.len(), 1);
5677 check_spends!(spend_txn[0], closing_tx);
5679 mine_transaction(&nodes[1], &closing_tx);
5680 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
5681 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5683 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5684 assert_eq!(spend_txn.len(), 1);
5685 check_spends!(spend_txn[0], closing_tx);
5688 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5689 let chanmon_cfgs = create_chanmon_cfgs(2);
5690 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5691 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5692 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5693 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5695 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5697 // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5698 // present in B's local commitment transaction, but none of A's commitment transactions.
5699 nodes[1].node.claim_funds(payment_preimage);
5700 check_added_monitors!(nodes[1], 1);
5701 expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5703 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5704 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5705 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
5707 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5708 check_added_monitors!(nodes[0], 1);
5709 let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5710 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5711 check_added_monitors!(nodes[1], 1);
5713 let starting_block = nodes[1].best_block_info();
5714 let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5715 for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5716 connect_block(&nodes[1], &block);
5717 block.header.prev_blockhash = block.block_hash();
5719 test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5720 check_closed_broadcast!(nodes[1], true);
5721 check_added_monitors!(nodes[1], 1);
5722 check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
5725 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5726 let chanmon_cfgs = create_chanmon_cfgs(2);
5727 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5728 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5729 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5730 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5732 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5733 nodes[0].node.send_payment_with_route(&route, payment_hash,
5734 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5735 check_added_monitors!(nodes[0], 1);
5737 let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5739 // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5740 // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5741 // to "time out" the HTLC.
5743 let starting_block = nodes[1].best_block_info();
5744 let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5746 for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5747 connect_block(&nodes[0], &block);
5748 block.header.prev_blockhash = block.block_hash();
5750 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5751 check_closed_broadcast!(nodes[0], true);
5752 check_added_monitors!(nodes[0], 1);
5753 check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5756 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5757 let chanmon_cfgs = create_chanmon_cfgs(3);
5758 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5759 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5760 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5761 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5763 // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5764 // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5765 // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5766 // actually revoked.
5767 let htlc_value = if use_dust { 50000 } else { 3000000 };
5768 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5769 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5770 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5771 check_added_monitors!(nodes[1], 1);
5773 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5774 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5775 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5776 check_added_monitors!(nodes[0], 1);
5777 let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5778 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5779 check_added_monitors!(nodes[1], 1);
5780 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5781 check_added_monitors!(nodes[1], 1);
5782 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5784 if check_revoke_no_close {
5785 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5786 check_added_monitors!(nodes[0], 1);
5789 let starting_block = nodes[1].best_block_info();
5790 let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5791 for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5792 connect_block(&nodes[0], &block);
5793 block.header.prev_blockhash = block.block_hash();
5795 if !check_revoke_no_close {
5796 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5797 check_closed_broadcast!(nodes[0], true);
5798 check_added_monitors!(nodes[0], 1);
5799 check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5801 expect_payment_failed!(nodes[0], our_payment_hash, true);
5805 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5806 // There are only a few cases to test here:
5807 // * its not really normative behavior, but we test that below-dust HTLCs "included" in
5808 // broadcastable commitment transactions result in channel closure,
5809 // * its included in an unrevoked-but-previous remote commitment transaction,
5810 // * its included in the latest remote or local commitment transactions.
5811 // We test each of the three possible commitment transactions individually and use both dust and
5813 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5814 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5815 // tested for at least one of the cases in other tests.
5817 fn htlc_claim_single_commitment_only_a() {
5818 do_htlc_claim_local_commitment_only(true);
5819 do_htlc_claim_local_commitment_only(false);
5821 do_htlc_claim_current_remote_commitment_only(true);
5822 do_htlc_claim_current_remote_commitment_only(false);
5826 fn htlc_claim_single_commitment_only_b() {
5827 do_htlc_claim_previous_remote_commitment_only(true, false);
5828 do_htlc_claim_previous_remote_commitment_only(false, false);
5829 do_htlc_claim_previous_remote_commitment_only(true, true);
5830 do_htlc_claim_previous_remote_commitment_only(false, true);
5835 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5836 let chanmon_cfgs = create_chanmon_cfgs(2);
5837 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5838 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5839 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5840 // Force duplicate randomness for every get-random call
5841 for node in nodes.iter() {
5842 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5845 // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5846 let channel_value_satoshis=10000;
5847 let push_msat=10001;
5848 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5849 let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5850 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5851 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5853 // Create a second channel with the same random values. This used to panic due to a colliding
5854 // channel_id, but now panics due to a colliding outbound SCID alias.
5855 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5859 fn bolt2_open_channel_sending_node_checks_part2() {
5860 let chanmon_cfgs = create_chanmon_cfgs(2);
5861 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5862 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5863 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5865 // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5866 let channel_value_satoshis=2^24;
5867 let push_msat=10001;
5868 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5870 // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5871 let channel_value_satoshis=10000;
5872 // Test when push_msat is equal to 1000 * funding_satoshis.
5873 let push_msat=1000*channel_value_satoshis+1;
5874 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5876 // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5877 let channel_value_satoshis=10000;
5878 let push_msat=10001;
5879 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
5880 let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5881 assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
5883 // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5884 // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5885 assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
5887 // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5888 assert!(BREAKDOWN_TIMEOUT>0);
5889 assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
5891 // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5892 let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
5893 assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
5895 // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5896 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
5897 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
5898 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
5899 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
5900 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
5904 fn bolt2_open_channel_sane_dust_limit() {
5905 let chanmon_cfgs = create_chanmon_cfgs(2);
5906 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5907 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5908 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5910 let channel_value_satoshis=1000000;
5911 let push_msat=10001;
5912 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5913 let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5914 node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
5915 node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5917 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5918 let events = nodes[1].node.get_and_clear_pending_msg_events();
5919 let err_msg = match events[0] {
5920 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5923 _ => panic!("Unexpected event"),
5925 assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5928 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5929 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5930 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5931 // is no longer affordable once it's freed.
5933 fn test_fail_holding_cell_htlc_upon_free() {
5934 let chanmon_cfgs = create_chanmon_cfgs(2);
5935 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5936 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5937 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5938 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5940 // First nodes[0] generates an update_fee, setting the channel's
5941 // pending_update_fee.
5943 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5944 *feerate_lock += 20;
5946 nodes[0].node.timer_tick_occurred();
5947 check_added_monitors!(nodes[0], 1);
5949 let events = nodes[0].node.get_and_clear_pending_msg_events();
5950 assert_eq!(events.len(), 1);
5951 let (update_msg, commitment_signed) = match events[0] {
5952 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5953 (update_fee.as_ref(), commitment_signed)
5955 _ => panic!("Unexpected event"),
5958 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5960 let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5961 let channel_reserve = chan_stat.channel_reserve_msat;
5962 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5963 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5965 // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5966 let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5967 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5969 // Send a payment which passes reserve checks but gets stuck in the holding cell.
5970 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5971 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5972 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5973 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5975 // Flush the pending fee update.
5976 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5977 let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5978 check_added_monitors!(nodes[1], 1);
5979 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5980 check_added_monitors!(nodes[0], 1);
5982 // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5983 // HTLC, but now that the fee has been raised the payment will now fail, causing
5984 // us to surface its failure to the user.
5985 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5986 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5987 nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
5989 // Check that the payment failed to be sent out.
5990 let events = nodes[0].node.get_and_clear_pending_events();
5991 assert_eq!(events.len(), 2);
5993 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5994 assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
5995 assert_eq!(our_payment_hash.clone(), *payment_hash);
5996 assert_eq!(*payment_failed_permanently, false);
5997 assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id));
5999 _ => panic!("Unexpected event"),
6002 &Event::PaymentFailed { ref payment_hash, .. } => {
6003 assert_eq!(our_payment_hash.clone(), *payment_hash);
6005 _ => panic!("Unexpected event"),
6009 // Test that if multiple HTLCs are released from the holding cell and one is
6010 // valid but the other is no longer valid upon release, the valid HTLC can be
6011 // successfully completed while the other one fails as expected.
6013 fn test_free_and_fail_holding_cell_htlcs() {
6014 let chanmon_cfgs = create_chanmon_cfgs(2);
6015 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6016 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6017 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6018 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6020 // First nodes[0] generates an update_fee, setting the channel's
6021 // pending_update_fee.
6023 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
6024 *feerate_lock += 200;
6026 nodes[0].node.timer_tick_occurred();
6027 check_added_monitors!(nodes[0], 1);
6029 let events = nodes[0].node.get_and_clear_pending_msg_events();
6030 assert_eq!(events.len(), 1);
6031 let (update_msg, commitment_signed) = match events[0] {
6032 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6033 (update_fee.as_ref(), commitment_signed)
6035 _ => panic!("Unexpected event"),
6038 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
6040 let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6041 let channel_reserve = chan_stat.channel_reserve_msat;
6042 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6043 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6045 // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6047 let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
6048 let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
6049 let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
6051 // Send 2 payments which pass reserve checks but get stuck in the holding cell.
6052 nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
6053 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
6054 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6055 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
6056 let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
6057 nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
6058 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
6059 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6060 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
6062 // Flush the pending fee update.
6063 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6064 let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6065 check_added_monitors!(nodes[1], 1);
6066 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
6067 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6068 check_added_monitors!(nodes[0], 2);
6070 // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
6071 // but now that the fee has been raised the second payment will now fail, causing us
6072 // to surface its failure to the user. The first payment should succeed.
6073 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6074 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6075 nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
6077 // Check that the second payment failed to be sent out.
6078 let events = nodes[0].node.get_and_clear_pending_events();
6079 assert_eq!(events.len(), 2);
6081 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
6082 assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
6083 assert_eq!(payment_hash_2.clone(), *payment_hash);
6084 assert_eq!(*payment_failed_permanently, false);
6085 assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id));
6087 _ => panic!("Unexpected event"),
6090 &Event::PaymentFailed { ref payment_hash, .. } => {
6091 assert_eq!(payment_hash_2.clone(), *payment_hash);
6093 _ => panic!("Unexpected event"),
6096 // Complete the first payment and the RAA from the fee update.
6097 let (payment_event, send_raa_event) = {
6098 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
6099 assert_eq!(msgs.len(), 2);
6100 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
6102 let raa = match send_raa_event {
6103 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
6104 _ => panic!("Unexpected event"),
6106 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6107 check_added_monitors!(nodes[1], 1);
6108 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6109 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6110 let events = nodes[1].node.get_and_clear_pending_events();
6111 assert_eq!(events.len(), 1);
6113 Event::PendingHTLCsForwardable { .. } => {},
6114 _ => panic!("Unexpected event"),
6116 nodes[1].node.process_pending_htlc_forwards();
6117 let events = nodes[1].node.get_and_clear_pending_events();
6118 assert_eq!(events.len(), 1);
6120 Event::PaymentClaimable { .. } => {},
6121 _ => panic!("Unexpected event"),
6123 nodes[1].node.claim_funds(payment_preimage_1);
6124 check_added_monitors!(nodes[1], 1);
6125 expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
6127 let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6128 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
6129 commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
6130 expect_payment_sent!(nodes[0], payment_preimage_1);
6133 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
6134 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
6135 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
6138 fn test_fail_holding_cell_htlc_upon_free_multihop() {
6139 let chanmon_cfgs = create_chanmon_cfgs(3);
6140 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6141 // Avoid having to include routing fees in calculations
6142 let mut config = test_default_channel_config();
6143 config.channel_config.forwarding_fee_base_msat = 0;
6144 config.channel_config.forwarding_fee_proportional_millionths = 0;
6145 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
6146 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6147 let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6148 let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
6150 // First nodes[1] generates an update_fee, setting the channel's
6151 // pending_update_fee.
6153 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
6154 *feerate_lock += 20;
6156 nodes[1].node.timer_tick_occurred();
6157 check_added_monitors!(nodes[1], 1);
6159 let events = nodes[1].node.get_and_clear_pending_msg_events();
6160 assert_eq!(events.len(), 1);
6161 let (update_msg, commitment_signed) = match events[0] {
6162 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6163 (update_fee.as_ref(), commitment_signed)
6165 _ => panic!("Unexpected event"),
6168 nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6170 let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
6171 let channel_reserve = chan_stat.channel_reserve_msat;
6172 let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
6173 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
6175 // Send a payment which passes reserve checks but gets stuck in the holding cell.
6176 let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6177 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
6178 let payment_event = {
6179 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6180 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6181 check_added_monitors!(nodes[0], 1);
6183 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6184 assert_eq!(events.len(), 1);
6186 SendEvent::from_event(events.remove(0))
6188 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6189 check_added_monitors!(nodes[1], 0);
6190 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6191 expect_pending_htlcs_forwardable!(nodes[1]);
6193 chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
6194 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6196 // Flush the pending fee update.
6197 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6198 let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6199 check_added_monitors!(nodes[2], 1);
6200 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6201 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6202 check_added_monitors!(nodes[1], 2);
6204 // A final RAA message is generated to finalize the fee update.
6205 let events = nodes[1].node.get_and_clear_pending_msg_events();
6206 assert_eq!(events.len(), 1);
6208 let raa_msg = match &events[0] {
6209 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6212 _ => panic!("Unexpected event"),
6215 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6216 check_added_monitors!(nodes[2], 1);
6217 assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6219 // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6220 let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6221 assert_eq!(process_htlc_forwards_event.len(), 2);
6222 match &process_htlc_forwards_event[1] {
6223 &Event::PendingHTLCsForwardable { .. } => {},
6224 _ => panic!("Unexpected event"),
6227 // In response, we call ChannelManager's process_pending_htlc_forwards
6228 nodes[1].node.process_pending_htlc_forwards();
6229 check_added_monitors!(nodes[1], 1);
6231 // This causes the HTLC to be failed backwards.
6232 let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6233 assert_eq!(fail_event.len(), 1);
6234 let (fail_msg, commitment_signed) = match &fail_event[0] {
6235 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6236 assert_eq!(updates.update_add_htlcs.len(), 0);
6237 assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6238 assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6239 assert_eq!(updates.update_fail_htlcs.len(), 1);
6240 (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6242 _ => panic!("Unexpected event"),
6245 // Pass the failure messages back to nodes[0].
6246 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6247 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6249 // Complete the HTLC failure+removal process.
6250 let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6251 check_added_monitors!(nodes[0], 1);
6252 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6253 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6254 check_added_monitors!(nodes[1], 2);
6255 let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6256 assert_eq!(final_raa_event.len(), 1);
6257 let raa = match &final_raa_event[0] {
6258 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6259 _ => panic!("Unexpected event"),
6261 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6262 expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6263 check_added_monitors!(nodes[0], 1);
6267 fn test_payment_route_reaching_same_channel_twice() {
6268 //A route should not go through the same channel twice
6269 //It is enforced when constructing a route.
6270 let chanmon_cfgs = create_chanmon_cfgs(2);
6271 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6272 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6273 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6274 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6276 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6277 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6278 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6280 // Extend the path by itself, essentially simulating route going through same channel twice
6281 let cloned_hops = route.paths[0].hops.clone();
6282 route.paths[0].hops.extend_from_slice(&cloned_hops);
6284 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6285 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6286 ), false, APIError::InvalidRoute { ref err },
6287 assert_eq!(err, &"Path went through the same channel twice"));
6290 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6291 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6292 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6295 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6296 //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6297 let chanmon_cfgs = create_chanmon_cfgs(2);
6298 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6299 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6300 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6301 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6303 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6304 route.paths[0].hops[0].fee_msat = 100;
6306 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6307 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6308 ), true, APIError::ChannelUnavailable { .. }, {});
6309 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6313 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6314 //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6315 let chanmon_cfgs = create_chanmon_cfgs(2);
6316 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6317 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6318 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6319 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6321 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6322 route.paths[0].hops[0].fee_msat = 0;
6323 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6324 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6325 true, APIError::ChannelUnavailable { ref err },
6326 assert_eq!(err, "Cannot send 0-msat HTLC"));
6328 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6329 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6333 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6334 //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6335 let chanmon_cfgs = create_chanmon_cfgs(2);
6336 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6337 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6338 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6339 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6341 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6342 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6343 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6344 check_added_monitors!(nodes[0], 1);
6345 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6346 updates.update_add_htlcs[0].amount_msat = 0;
6348 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6349 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3);
6350 check_closed_broadcast!(nodes[1], true).unwrap();
6351 check_added_monitors!(nodes[1], 1);
6352 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
6353 [nodes[0].node.get_our_node_id()], 100000);
6357 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6358 //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6359 //It is enforced when constructing a route.
6360 let chanmon_cfgs = create_chanmon_cfgs(2);
6361 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6362 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6363 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6364 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6366 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6367 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6368 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6369 route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
6370 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6371 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6372 ), true, APIError::InvalidRoute { ref err },
6373 assert_eq!(err, &"Channel CLTV overflowed?"));
6377 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6378 //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6379 //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6380 //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6381 let chanmon_cfgs = create_chanmon_cfgs(2);
6382 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6383 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6384 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6385 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6386 let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6387 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
6389 // Fetch a route in advance as we will be unable to once we're unable to send.
6390 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6391 for i in 0..max_accepted_htlcs {
6392 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6393 let payment_event = {
6394 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6395 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6396 check_added_monitors!(nodes[0], 1);
6398 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6399 assert_eq!(events.len(), 1);
6400 if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6401 assert_eq!(htlcs[0].htlc_id, i);
6405 SendEvent::from_event(events.remove(0))
6407 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6408 check_added_monitors!(nodes[1], 0);
6409 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6411 expect_pending_htlcs_forwardable!(nodes[1]);
6412 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6414 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6415 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6416 ), true, APIError::ChannelUnavailable { .. }, {});
6418 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6422 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6423 //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6424 let chanmon_cfgs = create_chanmon_cfgs(2);
6425 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6426 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6427 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6428 let channel_value = 100000;
6429 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6430 let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6432 send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6434 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6435 // Manually create a route over our max in flight (which our router normally automatically
6437 route.paths[0].hops[0].fee_msat = max_in_flight + 1;
6438 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6439 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6440 ), true, APIError::ChannelUnavailable { .. }, {});
6441 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6443 send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6446 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6448 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6449 //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6450 let chanmon_cfgs = create_chanmon_cfgs(2);
6451 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6452 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6453 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6454 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6455 let htlc_minimum_msat: u64;
6457 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6458 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6459 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6460 htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
6463 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6464 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6465 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6466 check_added_monitors!(nodes[0], 1);
6467 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6468 updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6469 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6470 assert!(nodes[1].node.list_channels().is_empty());
6471 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6472 assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6473 check_added_monitors!(nodes[1], 1);
6474 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6478 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6479 //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6480 let chanmon_cfgs = create_chanmon_cfgs(2);
6481 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6482 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6483 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6484 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6486 let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6487 let channel_reserve = chan_stat.channel_reserve_msat;
6488 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6489 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6490 // The 2* and +1 are for the fee spike reserve.
6491 let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6493 let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6494 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6495 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6496 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6497 check_added_monitors!(nodes[0], 1);
6498 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6500 // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6501 // at this time channel-initiatee receivers are not required to enforce that senders
6502 // respect the fee_spike_reserve.
6503 updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6504 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6506 assert!(nodes[1].node.list_channels().is_empty());
6507 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6508 assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6509 check_added_monitors!(nodes[1], 1);
6510 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6514 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6515 //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6516 //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6517 let chanmon_cfgs = create_chanmon_cfgs(2);
6518 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6519 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6520 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6521 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6523 let send_amt = 3999999;
6524 let (mut route, our_payment_hash, _, our_payment_secret) =
6525 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
6526 route.paths[0].hops[0].fee_msat = send_amt;
6527 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6528 let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
6529 let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6530 let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
6531 let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
6532 &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None).unwrap();
6533 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
6535 let mut msg = msgs::UpdateAddHTLC {
6539 payment_hash: our_payment_hash,
6540 cltv_expiry: htlc_cltv,
6541 onion_routing_packet: onion_packet.clone(),
6542 skimmed_fee_msat: None,
6543 blinding_point: None,
6547 msg.htlc_id = i as u64;
6548 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6550 msg.htlc_id = (50) as u64;
6551 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6553 assert!(nodes[1].node.list_channels().is_empty());
6554 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6555 assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6556 check_added_monitors!(nodes[1], 1);
6557 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6561 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6562 //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6563 let chanmon_cfgs = create_chanmon_cfgs(2);
6564 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6565 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6566 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6567 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6569 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6570 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6571 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6572 check_added_monitors!(nodes[0], 1);
6573 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6574 updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6575 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6577 assert!(nodes[1].node.list_channels().is_empty());
6578 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6579 assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6580 check_added_monitors!(nodes[1], 1);
6581 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
6585 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6586 //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6587 let chanmon_cfgs = create_chanmon_cfgs(2);
6588 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6589 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6590 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6592 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6593 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6594 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6595 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6596 check_added_monitors!(nodes[0], 1);
6597 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6598 updates.update_add_htlcs[0].cltv_expiry = 500000000;
6599 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6601 assert!(nodes[1].node.list_channels().is_empty());
6602 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6603 assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6604 check_added_monitors!(nodes[1], 1);
6605 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6609 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6610 //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6611 // We test this by first testing that that repeated HTLCs pass commitment signature checks
6612 // after disconnect and that non-sequential htlc_ids result in a channel failure.
6613 let chanmon_cfgs = create_chanmon_cfgs(2);
6614 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6615 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6616 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6618 create_announced_chan_between_nodes(&nodes, 0, 1);
6619 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6620 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6621 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6622 check_added_monitors!(nodes[0], 1);
6623 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6624 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6626 //Disconnect and Reconnect
6627 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6628 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6629 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
6630 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
6632 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6633 assert_eq!(reestablish_1.len(), 1);
6634 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
6635 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
6637 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6638 assert_eq!(reestablish_2.len(), 1);
6639 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6640 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6641 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6642 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6645 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6646 assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6647 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6648 check_added_monitors!(nodes[1], 1);
6649 let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6651 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6653 assert!(nodes[1].node.list_channels().is_empty());
6654 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6655 assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6656 check_added_monitors!(nodes[1], 1);
6657 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6661 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6662 //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6664 let chanmon_cfgs = create_chanmon_cfgs(2);
6665 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6666 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6667 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6668 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6669 let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6670 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6671 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6673 check_added_monitors!(nodes[0], 1);
6674 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6675 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6677 let update_msg = msgs::UpdateFulfillHTLC{
6680 payment_preimage: our_payment_preimage,
6683 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6685 assert!(nodes[0].node.list_channels().is_empty());
6686 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6687 assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6688 check_added_monitors!(nodes[0], 1);
6689 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6693 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6694 //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6696 let chanmon_cfgs = create_chanmon_cfgs(2);
6697 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6698 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6699 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6700 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6702 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6703 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6704 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6705 check_added_monitors!(nodes[0], 1);
6706 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6707 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6709 let update_msg = msgs::UpdateFailHTLC{
6712 reason: msgs::OnionErrorPacket { data: Vec::new()},
6715 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6717 assert!(nodes[0].node.list_channels().is_empty());
6718 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6719 assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6720 check_added_monitors!(nodes[0], 1);
6721 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6725 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6726 //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6728 let chanmon_cfgs = create_chanmon_cfgs(2);
6729 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6730 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6731 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6732 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6734 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6735 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6736 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6737 check_added_monitors!(nodes[0], 1);
6738 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6739 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6740 let update_msg = msgs::UpdateFailMalformedHTLC{
6743 sha256_of_onion: [1; 32],
6744 failure_code: 0x8000,
6747 nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6749 assert!(nodes[0].node.list_channels().is_empty());
6750 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6751 assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6752 check_added_monitors!(nodes[0], 1);
6753 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6757 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6758 //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6760 let chanmon_cfgs = create_chanmon_cfgs(2);
6761 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6762 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6763 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6764 create_announced_chan_between_nodes(&nodes, 0, 1);
6766 let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6768 nodes[1].node.claim_funds(our_payment_preimage);
6769 check_added_monitors!(nodes[1], 1);
6770 expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6772 let events = nodes[1].node.get_and_clear_pending_msg_events();
6773 assert_eq!(events.len(), 1);
6774 let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6776 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6777 assert!(update_add_htlcs.is_empty());
6778 assert_eq!(update_fulfill_htlcs.len(), 1);
6779 assert!(update_fail_htlcs.is_empty());
6780 assert!(update_fail_malformed_htlcs.is_empty());
6781 assert!(update_fee.is_none());
6782 update_fulfill_htlcs[0].clone()
6784 _ => panic!("Unexpected event"),
6788 update_fulfill_msg.htlc_id = 1;
6790 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6792 assert!(nodes[0].node.list_channels().is_empty());
6793 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6794 assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6795 check_added_monitors!(nodes[0], 1);
6796 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6800 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6801 //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6803 let chanmon_cfgs = create_chanmon_cfgs(2);
6804 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6805 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6806 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6807 create_announced_chan_between_nodes(&nodes, 0, 1);
6809 let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6811 nodes[1].node.claim_funds(our_payment_preimage);
6812 check_added_monitors!(nodes[1], 1);
6813 expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6815 let events = nodes[1].node.get_and_clear_pending_msg_events();
6816 assert_eq!(events.len(), 1);
6817 let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6819 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6820 assert!(update_add_htlcs.is_empty());
6821 assert_eq!(update_fulfill_htlcs.len(), 1);
6822 assert!(update_fail_htlcs.is_empty());
6823 assert!(update_fail_malformed_htlcs.is_empty());
6824 assert!(update_fee.is_none());
6825 update_fulfill_htlcs[0].clone()
6827 _ => panic!("Unexpected event"),
6831 update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6833 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6835 assert!(nodes[0].node.list_channels().is_empty());
6836 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6837 assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6838 check_added_monitors!(nodes[0], 1);
6839 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6843 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6844 //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6846 let chanmon_cfgs = create_chanmon_cfgs(2);
6847 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6848 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6849 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6850 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6852 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6853 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6854 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6855 check_added_monitors!(nodes[0], 1);
6857 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6858 updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6860 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6861 check_added_monitors!(nodes[1], 0);
6862 commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6864 let events = nodes[1].node.get_and_clear_pending_msg_events();
6866 let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6868 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6869 assert!(update_add_htlcs.is_empty());
6870 assert!(update_fulfill_htlcs.is_empty());
6871 assert!(update_fail_htlcs.is_empty());
6872 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6873 assert!(update_fee.is_none());
6874 update_fail_malformed_htlcs[0].clone()
6876 _ => panic!("Unexpected event"),
6879 update_msg.failure_code &= !0x8000;
6880 nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6882 assert!(nodes[0].node.list_channels().is_empty());
6883 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6884 assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6885 check_added_monitors!(nodes[0], 1);
6886 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
6890 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6891 //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6892 // * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6894 let chanmon_cfgs = create_chanmon_cfgs(3);
6895 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6896 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6897 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6898 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6899 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6901 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6904 let mut payment_event = {
6905 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6906 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6907 check_added_monitors!(nodes[0], 1);
6908 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6909 assert_eq!(events.len(), 1);
6910 SendEvent::from_event(events.remove(0))
6912 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6913 check_added_monitors!(nodes[1], 0);
6914 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6915 expect_pending_htlcs_forwardable!(nodes[1]);
6916 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6917 assert_eq!(events_2.len(), 1);
6918 check_added_monitors!(nodes[1], 1);
6919 payment_event = SendEvent::from_event(events_2.remove(0));
6920 assert_eq!(payment_event.msgs.len(), 1);
6923 payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6924 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6925 check_added_monitors!(nodes[2], 0);
6926 commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6928 let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6929 assert_eq!(events_3.len(), 1);
6930 let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6932 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6933 assert!(update_add_htlcs.is_empty());
6934 assert!(update_fulfill_htlcs.is_empty());
6935 assert!(update_fail_htlcs.is_empty());
6936 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6937 assert!(update_fee.is_none());
6938 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6940 _ => panic!("Unexpected event"),
6944 nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6946 check_added_monitors!(nodes[1], 0);
6947 commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6948 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6949 let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6950 assert_eq!(events_4.len(), 1);
6952 //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6954 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6955 assert!(update_add_htlcs.is_empty());
6956 assert!(update_fulfill_htlcs.is_empty());
6957 assert_eq!(update_fail_htlcs.len(), 1);
6958 assert!(update_fail_malformed_htlcs.is_empty());
6959 assert!(update_fee.is_none());
6961 _ => panic!("Unexpected event"),
6964 check_added_monitors!(nodes[1], 1);
6968 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6969 let chanmon_cfgs = create_chanmon_cfgs(3);
6970 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6971 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6972 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6973 create_announced_chan_between_nodes(&nodes, 0, 1);
6974 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6976 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6979 let mut payment_event = {
6980 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6981 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6982 check_added_monitors!(nodes[0], 1);
6983 SendEvent::from_node(&nodes[0])
6986 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6987 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6988 expect_pending_htlcs_forwardable!(nodes[1]);
6989 check_added_monitors!(nodes[1], 1);
6990 payment_event = SendEvent::from_node(&nodes[1]);
6991 assert_eq!(payment_event.msgs.len(), 1);
6994 payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
6995 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6996 check_added_monitors!(nodes[2], 0);
6997 commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6999 let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
7000 assert_eq!(events_3.len(), 1);
7002 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
7003 let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
7004 // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
7005 update_msg.failure_code |= 0x2000;
7007 nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
7008 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
7010 _ => panic!("Unexpected event"),
7013 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
7014 vec![HTLCDestination::NextHopChannel {
7015 node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
7016 let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
7017 assert_eq!(events_4.len(), 1);
7018 check_added_monitors!(nodes[1], 1);
7021 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
7022 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
7023 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
7025 _ => panic!("Unexpected event"),
7028 let events_5 = nodes[0].node.get_and_clear_pending_events();
7029 assert_eq!(events_5.len(), 2);
7031 // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
7032 // the node originating the error to its next hop.
7034 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
7036 assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
7037 assert!(is_permanent);
7038 assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
7040 _ => panic!("Unexpected event"),
7043 Event::PaymentFailed { payment_hash, .. } => {
7044 assert_eq!(payment_hash, our_payment_hash);
7046 _ => panic!("Unexpected event"),
7049 // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
7052 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
7053 // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
7054 // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
7055 // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
7057 let mut chanmon_cfgs = create_chanmon_cfgs(2);
7058 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7059 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7060 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7061 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7062 let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
7064 let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7065 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7067 // We route 2 dust-HTLCs between A and B
7068 let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7069 let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7070 route_payment(&nodes[0], &[&nodes[1]], 1000000);
7072 // Cache one local commitment tx as previous
7073 let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7075 // Fail one HTLC to prune it in the will-be-latest-local commitment tx
7076 nodes[1].node.fail_htlc_backwards(&payment_hash_2);
7077 check_added_monitors!(nodes[1], 0);
7078 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7079 check_added_monitors!(nodes[1], 1);
7081 let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7082 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
7083 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
7084 check_added_monitors!(nodes[0], 1);
7086 // Cache one local commitment tx as lastest
7087 let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7089 let events = nodes[0].node.get_and_clear_pending_msg_events();
7091 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
7092 assert_eq!(node_id, nodes[1].node.get_our_node_id());
7094 _ => panic!("Unexpected event"),
7097 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
7098 assert_eq!(node_id, nodes[1].node.get_our_node_id());
7100 _ => panic!("Unexpected event"),
7103 assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
7104 // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
7105 if announce_latest {
7106 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
7108 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
7111 check_closed_broadcast!(nodes[0], true);
7112 check_added_monitors!(nodes[0], 1);
7113 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7115 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7116 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7117 let events = nodes[0].node.get_and_clear_pending_events();
7118 // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
7119 assert_eq!(events.len(), 4);
7120 let mut first_failed = false;
7121 for event in events {
7123 Event::PaymentPathFailed { payment_hash, .. } => {
7124 if payment_hash == payment_hash_1 {
7125 assert!(!first_failed);
7126 first_failed = true;
7128 assert_eq!(payment_hash, payment_hash_2);
7131 Event::PaymentFailed { .. } => {}
7132 _ => panic!("Unexpected event"),
7138 fn test_failure_delay_dust_htlc_local_commitment() {
7139 do_test_failure_delay_dust_htlc_local_commitment(true);
7140 do_test_failure_delay_dust_htlc_local_commitment(false);
7143 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
7144 // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
7145 // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
7146 // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
7147 // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
7148 // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
7149 // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
7151 let chanmon_cfgs = create_chanmon_cfgs(3);
7152 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7153 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7154 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7155 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
7157 let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7158 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7160 let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7161 let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7163 let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7164 let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
7166 // We revoked bs_commitment_tx
7168 let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7169 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
7172 let mut timeout_tx = Vec::new();
7174 // We fail dust-HTLC 1 by broadcast of local commitment tx
7175 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7176 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7177 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7178 expect_payment_failed!(nodes[0], dust_hash, false);
7180 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7181 check_closed_broadcast!(nodes[0], true);
7182 check_added_monitors!(nodes[0], 1);
7183 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7184 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
7185 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7186 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7187 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7188 mine_transaction(&nodes[0], &timeout_tx[0]);
7189 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7190 expect_payment_failed!(nodes[0], non_dust_hash, false);
7192 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7193 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7194 check_closed_broadcast!(nodes[0], true);
7195 check_added_monitors!(nodes[0], 1);
7196 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7197 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7199 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7200 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
7201 .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
7202 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
7203 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
7204 // dust HTLC should have been failed.
7205 expect_payment_failed!(nodes[0], dust_hash, false);
7208 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7210 assert_eq!(timeout_tx[0].lock_time.to_consensus_u32(), 11);
7212 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
7213 mine_transaction(&nodes[0], &timeout_tx[0]);
7214 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7215 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7216 expect_payment_failed!(nodes[0], non_dust_hash, false);
7221 fn test_sweep_outbound_htlc_failure_update() {
7222 do_test_sweep_outbound_htlc_failure_update(false, true);
7223 do_test_sweep_outbound_htlc_failure_update(false, false);
7224 do_test_sweep_outbound_htlc_failure_update(true, false);
7228 fn test_user_configurable_csv_delay() {
7229 // We test our channel constructors yield errors when we pass them absurd csv delay
7231 let mut low_our_to_self_config = UserConfig::default();
7232 low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
7233 let mut high_their_to_self_config = UserConfig::default();
7234 high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
7235 let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7236 let chanmon_cfgs = create_chanmon_cfgs(2);
7237 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7238 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7239 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7241 // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
7242 if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7243 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
7244 &low_our_to_self_config, 0, 42, None)
7247 APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7248 _ => panic!("Unexpected event"),
7250 } else { assert!(false) }
7252 // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
7253 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7254 let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7255 open_channel.common_fields.to_self_delay = 200;
7256 if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7257 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7258 &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7261 ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7262 _ => panic!("Unexpected event"),
7264 } else { assert!(false); }
7266 // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7267 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7268 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7269 let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7270 accept_channel.common_fields.to_self_delay = 200;
7271 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
7273 if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7275 &ErrorAction::SendErrorMessage { ref msg } => {
7276 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7277 reason_msg = msg.data.clone();
7281 } else { panic!(); }
7282 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
7284 // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
7285 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7286 let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7287 open_channel.common_fields.to_self_delay = 200;
7288 if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7289 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7290 &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7293 ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7294 _ => panic!("Unexpected event"),
7296 } else { assert!(false); }
7300 fn test_check_htlc_underpaying() {
7301 // Send payment through A -> B but A is maliciously
7302 // sending a probe payment (i.e less than expected value0
7303 // to B, B should refuse payment.
7305 let chanmon_cfgs = create_chanmon_cfgs(2);
7306 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7307 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7308 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7310 // Create some initial channels
7311 create_announced_chan_between_nodes(&nodes, 0, 1);
7313 let scorer = test_utils::TestScorer::new();
7314 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7315 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
7316 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7317 let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
7318 let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
7319 None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7320 let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7321 let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7322 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7323 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7324 check_added_monitors!(nodes[0], 1);
7326 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7327 assert_eq!(events.len(), 1);
7328 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7329 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7330 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7332 // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7333 // and then will wait a second random delay before failing the HTLC back:
7334 expect_pending_htlcs_forwardable!(nodes[1]);
7335 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7337 // Node 3 is expecting payment of 100_000 but received 10_000,
7338 // it should fail htlc like we didn't know the preimage.
7339 nodes[1].node.process_pending_htlc_forwards();
7341 let events = nodes[1].node.get_and_clear_pending_msg_events();
7342 assert_eq!(events.len(), 1);
7343 let (update_fail_htlc, commitment_signed) = match events[0] {
7344 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7345 assert!(update_add_htlcs.is_empty());
7346 assert!(update_fulfill_htlcs.is_empty());
7347 assert_eq!(update_fail_htlcs.len(), 1);
7348 assert!(update_fail_malformed_htlcs.is_empty());
7349 assert!(update_fee.is_none());
7350 (update_fail_htlcs[0].clone(), commitment_signed)
7352 _ => panic!("Unexpected event"),
7354 check_added_monitors!(nodes[1], 1);
7356 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7357 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7359 // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7360 let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7361 expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7362 expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7366 fn test_announce_disable_channels() {
7367 // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7368 // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7370 let chanmon_cfgs = create_chanmon_cfgs(2);
7371 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7372 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7373 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7375 // Connect a dummy node for proper future events broadcasting
7376 connect_dummy_node(&nodes[0]);
7378 create_announced_chan_between_nodes(&nodes, 0, 1);
7379 create_announced_chan_between_nodes(&nodes, 1, 0);
7380 create_announced_chan_between_nodes(&nodes, 0, 1);
7383 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7384 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7386 for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
7387 nodes[0].node.timer_tick_occurred();
7389 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7390 assert_eq!(msg_events.len(), 3);
7391 let mut chans_disabled = new_hash_map();
7392 for e in msg_events {
7394 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7395 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7396 // Check that each channel gets updated exactly once
7397 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7398 panic!("Generated ChannelUpdate for wrong chan!");
7401 _ => panic!("Unexpected event"),
7405 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
7406 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
7408 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7409 assert_eq!(reestablish_1.len(), 3);
7410 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
7411 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
7413 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7414 assert_eq!(reestablish_2.len(), 3);
7416 // Reestablish chan_1
7417 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7418 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7419 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7420 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7421 // Reestablish chan_2
7422 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7423 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7424 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7425 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7426 // Reestablish chan_3
7427 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7428 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7429 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7430 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7432 for _ in 0..ENABLE_GOSSIP_TICKS {
7433 nodes[0].node.timer_tick_occurred();
7435 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7436 nodes[0].node.timer_tick_occurred();
7437 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7438 assert_eq!(msg_events.len(), 3);
7439 for e in msg_events {
7441 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7442 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7443 match chans_disabled.remove(&msg.contents.short_channel_id) {
7444 // Each update should have a higher timestamp than the previous one, replacing
7446 Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7447 None => panic!("Generated ChannelUpdate for wrong chan!"),
7450 _ => panic!("Unexpected event"),
7453 // Check that each channel gets updated exactly once
7454 assert!(chans_disabled.is_empty());
7458 fn test_bump_penalty_txn_on_revoked_commitment() {
7459 // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7460 // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7462 let chanmon_cfgs = create_chanmon_cfgs(2);
7463 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7464 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7465 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7467 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7469 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7470 let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7471 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7472 let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
7473 send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7475 let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7476 // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7477 assert_eq!(revoked_txn[0].output.len(), 4);
7478 assert_eq!(revoked_txn[0].input.len(), 1);
7479 assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7480 let revoked_txid = revoked_txn[0].txid();
7482 let mut penalty_sum = 0;
7483 for outp in revoked_txn[0].output.iter() {
7484 if outp.script_pubkey.is_p2wsh() {
7485 penalty_sum += outp.value.to_sat();
7489 // Connect blocks to change height_timer range to see if we use right soonest_timelock
7490 let header_114 = connect_blocks(&nodes[1], 14);
7492 // Actually revoke tx by claiming a HTLC
7493 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7494 connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
7495 check_added_monitors!(nodes[1], 1);
7497 // One or more justice tx should have been broadcast, check it
7501 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7502 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7503 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7504 assert_eq!(node_txn[0].output.len(), 1);
7505 check_spends!(node_txn[0], revoked_txn[0]);
7506 let fee_1 = penalty_sum - node_txn[0].output[0].value.to_sat();
7507 feerate_1 = fee_1 * 1000 / node_txn[0].weight().to_wu();
7508 penalty_1 = node_txn[0].txid();
7512 // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7513 connect_blocks(&nodes[1], 15);
7514 let mut penalty_2 = penalty_1;
7515 let mut feerate_2 = 0;
7517 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7518 assert_eq!(node_txn.len(), 1);
7519 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7520 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7521 assert_eq!(node_txn[0].output.len(), 1);
7522 check_spends!(node_txn[0], revoked_txn[0]);
7523 penalty_2 = node_txn[0].txid();
7524 // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7525 assert_ne!(penalty_2, penalty_1);
7526 let fee_2 = penalty_sum - node_txn[0].output[0].value.to_sat();
7527 feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7528 // Verify 25% bump heuristic
7529 assert!(feerate_2 * 100 >= feerate_1 * 125);
7533 assert_ne!(feerate_2, 0);
7535 // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7536 connect_blocks(&nodes[1], 1);
7538 let mut feerate_3 = 0;
7540 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7541 assert_eq!(node_txn.len(), 1);
7542 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7543 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7544 assert_eq!(node_txn[0].output.len(), 1);
7545 check_spends!(node_txn[0], revoked_txn[0]);
7546 penalty_3 = node_txn[0].txid();
7547 // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7548 assert_ne!(penalty_3, penalty_2);
7549 let fee_3 = penalty_sum - node_txn[0].output[0].value.to_sat();
7550 feerate_3 = fee_3 * 1000 / node_txn[0].weight().to_wu();
7551 // Verify 25% bump heuristic
7552 assert!(feerate_3 * 100 >= feerate_2 * 125);
7556 assert_ne!(feerate_3, 0);
7558 nodes[1].node.get_and_clear_pending_events();
7559 nodes[1].node.get_and_clear_pending_msg_events();
7563 fn test_bump_penalty_txn_on_revoked_htlcs() {
7564 // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7565 // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7567 let mut chanmon_cfgs = create_chanmon_cfgs(2);
7568 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7569 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7570 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7571 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7573 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7574 // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7575 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7576 let scorer = test_utils::TestScorer::new();
7577 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7578 let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7579 let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
7580 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7581 let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7582 let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50)
7583 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7584 let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7585 let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
7586 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7587 let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
7589 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7590 assert_eq!(revoked_local_txn[0].input.len(), 1);
7591 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7593 // Revoke local commitment tx
7594 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7596 // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7597 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
7598 check_closed_broadcast!(nodes[1], true);
7599 check_added_monitors!(nodes[1], 1);
7600 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
7601 connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7603 let revoked_htlc_txn = {
7604 let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
7605 assert_eq!(txn.len(), 2);
7607 assert_eq!(txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7608 assert_eq!(txn[0].input.len(), 1);
7609 check_spends!(txn[0], revoked_local_txn[0]);
7611 assert_eq!(txn[1].input.len(), 1);
7612 assert_eq!(txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7613 assert_eq!(txn[1].output.len(), 1);
7614 check_spends!(txn[1], revoked_local_txn[0]);
7619 // Broadcast set of revoked txn on A
7620 let hash_128 = connect_blocks(&nodes[0], 40);
7621 let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
7622 connect_block(&nodes[0], &block_11);
7623 let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
7624 connect_block(&nodes[0], &block_129);
7625 let events = nodes[0].node.get_and_clear_pending_events();
7626 expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
7627 match events.last().unwrap() {
7628 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7629 _ => panic!("Unexpected event"),
7635 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7636 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7637 // Verify claim tx are spending revoked HTLC txn
7639 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7640 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7641 // which are included in the same block (they are broadcasted because we scan the
7642 // transactions linearly and generate claims as we go, they likely should be removed in the
7644 assert_eq!(node_txn[0].input.len(), 1);
7645 check_spends!(node_txn[0], revoked_local_txn[0]);
7646 assert_eq!(node_txn[1].input.len(), 1);
7647 check_spends!(node_txn[1], revoked_local_txn[0]);
7648 assert_eq!(node_txn[2].input.len(), 1);
7649 check_spends!(node_txn[2], revoked_local_txn[0]);
7651 // Each of the three justice transactions claim a separate (single) output of the three
7652 // available, which we check here:
7653 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7654 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7655 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7657 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7658 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7660 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7661 // output, checked above).
7662 assert_eq!(node_txn[3].input.len(), 2);
7663 assert_eq!(node_txn[3].output.len(), 1);
7664 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7666 first = node_txn[3].txid();
7667 // Store both feerates for later comparison
7668 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7669 feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu();
7670 penalty_txn = vec![node_txn[2].clone()];
7674 // Connect one more block to see if bumped penalty are issued for HTLC txn
7675 let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
7676 connect_block(&nodes[0], &block_130);
7677 let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
7678 connect_block(&nodes[0], &block_131);
7680 // Few more blocks to confirm penalty txn
7681 connect_blocks(&nodes[0], 4);
7682 assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7683 let header_144 = connect_blocks(&nodes[0], 9);
7685 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7686 assert_eq!(node_txn.len(), 1);
7688 assert_eq!(node_txn[0].input.len(), 2);
7689 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7690 // Verify bumped tx is different and 25% bump heuristic
7691 assert_ne!(first, node_txn[0].txid());
7692 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7693 let feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7694 assert!(feerate_2 * 100 > feerate_1 * 125);
7695 let txn = vec![node_txn[0].clone()];
7699 // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7700 connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
7701 connect_blocks(&nodes[0], 20);
7703 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7704 // We verify than no new transaction has been broadcast because previously
7705 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7706 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7707 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7708 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7709 // up bumped justice generation.
7710 assert_eq!(node_txn.len(), 0);
7713 check_closed_broadcast!(nodes[0], true);
7714 check_added_monitors!(nodes[0], 1);
7718 fn test_bump_penalty_txn_on_remote_commitment() {
7719 // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7720 // we're able to claim outputs on remote commitment transaction before timelocks expiration
7723 // Provide preimage for one
7724 // Check aggregation
7726 let chanmon_cfgs = create_chanmon_cfgs(2);
7727 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7728 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7729 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7731 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7732 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7733 route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7735 // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7736 let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7737 assert_eq!(remote_txn[0].output.len(), 4);
7738 assert_eq!(remote_txn[0].input.len(), 1);
7739 assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7741 // Claim a HTLC without revocation (provide B monitor with preimage)
7742 nodes[1].node.claim_funds(payment_preimage);
7743 expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7744 mine_transaction(&nodes[1], &remote_txn[0]);
7745 check_added_monitors!(nodes[1], 2);
7746 connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7748 // One or more claim tx should have been broadcast, check it
7752 let feerate_timeout;
7753 let feerate_preimage;
7755 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7756 // 3 transactions including:
7757 // preimage and timeout sweeps from remote commitment + preimage sweep bump
7758 assert_eq!(node_txn.len(), 3);
7759 assert_eq!(node_txn[0].input.len(), 1);
7760 assert_eq!(node_txn[1].input.len(), 1);
7761 assert_eq!(node_txn[2].input.len(), 1);
7762 check_spends!(node_txn[0], remote_txn[0]);
7763 check_spends!(node_txn[1], remote_txn[0]);
7764 check_spends!(node_txn[2], remote_txn[0]);
7766 preimage = node_txn[0].txid();
7767 let index = node_txn[0].input[0].previous_output.vout;
7768 let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat();
7769 feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu();
7771 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7772 (node_txn[2].clone(), node_txn[1].clone())
7774 (node_txn[1].clone(), node_txn[2].clone())
7777 preimage_bump = preimage_bump_tx;
7778 check_spends!(preimage_bump, remote_txn[0]);
7779 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7781 timeout = timeout_tx.txid();
7782 let index = timeout_tx.input[0].previous_output.vout;
7783 let fee = remote_txn[0].output[index as usize].value.to_sat() - timeout_tx.output[0].value.to_sat();
7784 feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu();
7788 assert_ne!(feerate_timeout, 0);
7789 assert_ne!(feerate_preimage, 0);
7791 // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7792 connect_blocks(&nodes[1], 1);
7794 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7795 assert_eq!(node_txn.len(), 1);
7796 assert_eq!(node_txn[0].input.len(), 1);
7797 assert_eq!(preimage_bump.input.len(), 1);
7798 check_spends!(node_txn[0], remote_txn[0]);
7799 check_spends!(preimage_bump, remote_txn[0]);
7801 let index = preimage_bump.input[0].previous_output.vout;
7802 let fee = remote_txn[0].output[index as usize].value.to_sat() - preimage_bump.output[0].value.to_sat();
7803 let new_feerate = fee * 1000 / preimage_bump.weight().to_wu();
7804 assert!(new_feerate * 100 > feerate_timeout * 125);
7805 assert_ne!(timeout, preimage_bump.txid());
7807 let index = node_txn[0].input[0].previous_output.vout;
7808 let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat();
7809 let new_feerate = fee * 1000 / node_txn[0].weight().to_wu();
7810 assert!(new_feerate * 100 > feerate_preimage * 125);
7811 assert_ne!(preimage, node_txn[0].txid());
7816 nodes[1].node.get_and_clear_pending_events();
7817 nodes[1].node.get_and_clear_pending_msg_events();
7821 fn test_counterparty_raa_skip_no_crash() {
7822 // Previously, if our counterparty sent two RAAs in a row without us having provided a
7823 // commitment transaction, we would have happily carried on and provided them the next
7824 // commitment transaction based on one RAA forward. This would probably eventually have led to
7825 // channel closure, but it would not have resulted in funds loss. Still, our
7826 // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
7827 // check simply that the channel is closed in response to such an RAA, but don't check whether
7828 // we decide to punish our counterparty for revoking their funds (as we don't currently
7830 let chanmon_cfgs = create_chanmon_cfgs(2);
7831 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7832 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7833 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7834 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7836 let per_commitment_secret;
7837 let next_per_commitment_point;
7839 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7840 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7841 let keys = guard.channel_by_id.get_mut(&channel_id).map(
7842 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
7843 ).flatten().unwrap().get_signer();
7845 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7847 // Make signer believe we got a counterparty signature, so that it allows the revocation
7848 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7849 per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7851 // Must revoke without gaps
7852 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7853 keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7855 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7856 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7857 &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7860 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7861 &msgs::RevokeAndACK {
7863 per_commitment_secret,
7864 next_per_commitment_point,
7866 next_local_nonce: None,
7868 assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7869 check_added_monitors!(nodes[1], 1);
7870 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
7871 , [nodes[0].node.get_our_node_id()], 100000);
7875 fn test_bump_txn_sanitize_tracking_maps() {
7876 // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7877 // verify we clean then right after expiration of ANTI_REORG_DELAY.
7879 let chanmon_cfgs = create_chanmon_cfgs(2);
7880 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7881 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7882 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7884 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7885 // Lock HTLC in both directions
7886 let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7887 let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7889 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7890 assert_eq!(revoked_local_txn[0].input.len(), 1);
7891 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7893 // Revoke local commitment tx
7894 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7896 // Broadcast set of revoked txn on A
7897 connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7898 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7899 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7901 mine_transaction(&nodes[0], &revoked_local_txn[0]);
7902 check_closed_broadcast!(nodes[0], true);
7903 check_added_monitors!(nodes[0], 1);
7904 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
7906 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7907 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7908 check_spends!(node_txn[0], revoked_local_txn[0]);
7909 check_spends!(node_txn[1], revoked_local_txn[0]);
7910 check_spends!(node_txn[2], revoked_local_txn[0]);
7911 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7915 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
7916 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7918 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7919 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7920 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7925 fn test_channel_conf_timeout() {
7926 // Tests that, for inbound channels, we give up on them if the funding transaction does not
7927 // confirm within 2016 blocks, as recommended by BOLT 2.
7928 let chanmon_cfgs = create_chanmon_cfgs(2);
7929 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7930 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7931 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7933 let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7935 // The outbound node should wait forever for confirmation:
7936 // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7937 // copied here instead of directly referencing the constant.
7938 connect_blocks(&nodes[0], 2016);
7939 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7941 // The inbound node should fail the channel after exactly 2016 blocks
7942 connect_blocks(&nodes[1], 2015);
7943 check_added_monitors!(nodes[1], 0);
7944 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7946 connect_blocks(&nodes[1], 1);
7947 check_added_monitors!(nodes[1], 1);
7948 check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
7949 let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7950 assert_eq!(close_ev.len(), 1);
7952 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => {
7953 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7954 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7956 _ => panic!("Unexpected event"),
7961 fn test_override_channel_config() {
7962 let chanmon_cfgs = create_chanmon_cfgs(2);
7963 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7964 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7965 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7967 // Node0 initiates a channel to node1 using the override config.
7968 let mut override_config = UserConfig::default();
7969 override_config.channel_handshake_config.our_to_self_delay = 200;
7971 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap();
7973 // Assert the channel created by node0 is using the override config.
7974 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7975 assert_eq!(res.common_fields.channel_flags, 0);
7976 assert_eq!(res.common_fields.to_self_delay, 200);
7980 fn test_override_0msat_htlc_minimum() {
7981 let mut zero_config = UserConfig::default();
7982 zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7983 let chanmon_cfgs = create_chanmon_cfgs(2);
7984 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7985 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7986 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7988 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
7989 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7990 assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7992 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7993 let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7994 assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7998 fn test_channel_update_has_correct_htlc_maximum_msat() {
7999 // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
8000 // Bolt 7 specifies that if present `htlc_maximum_msat`:
8001 // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
8002 // 90% of the `channel_value`.
8003 // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
8005 let mut config_30_percent = UserConfig::default();
8006 config_30_percent.channel_handshake_config.announced_channel = true;
8007 config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
8008 let mut config_50_percent = UserConfig::default();
8009 config_50_percent.channel_handshake_config.announced_channel = true;
8010 config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
8011 let mut config_95_percent = UserConfig::default();
8012 config_95_percent.channel_handshake_config.announced_channel = true;
8013 config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
8014 let mut config_100_percent = UserConfig::default();
8015 config_100_percent.channel_handshake_config.announced_channel = true;
8016 config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
8018 let chanmon_cfgs = create_chanmon_cfgs(4);
8019 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8020 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
8021 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8023 let channel_value_satoshis = 100000;
8024 let channel_value_msat = channel_value_satoshis * 1000;
8025 let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
8026 let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
8027 let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
8029 let (node_0_chan_update, node_1_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
8030 let (node_2_chan_update, node_3_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
8032 // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
8033 // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
8034 assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
8035 // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
8036 // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
8037 assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
8039 // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8040 // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
8042 assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8043 // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8044 // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
8046 assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8050 fn test_manually_accept_inbound_channel_request() {
8051 let mut manually_accept_conf = UserConfig::default();
8052 manually_accept_conf.manually_accept_inbound_channels = true;
8053 let chanmon_cfgs = create_chanmon_cfgs(2);
8054 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8055 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8056 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8058 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8059 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8061 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8063 // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8064 // accepting the inbound channel request.
8065 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8067 let events = nodes[1].node.get_and_clear_pending_events();
8069 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8070 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
8072 _ => panic!("Unexpected event"),
8075 let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8076 assert_eq!(accept_msg_ev.len(), 1);
8078 match accept_msg_ev[0] {
8079 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8080 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8082 _ => panic!("Unexpected event"),
8085 nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8087 let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8088 assert_eq!(close_msg_ev.len(), 1);
8090 let events = nodes[1].node.get_and_clear_pending_events();
8092 Event::ChannelClosed { user_channel_id, .. } => {
8093 assert_eq!(user_channel_id, 23);
8095 _ => panic!("Unexpected event"),
8100 fn test_manually_reject_inbound_channel_request() {
8101 let mut manually_accept_conf = UserConfig::default();
8102 manually_accept_conf.manually_accept_inbound_channels = true;
8103 let chanmon_cfgs = create_chanmon_cfgs(2);
8104 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8105 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8106 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8108 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8109 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8111 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8113 // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8114 // rejecting the inbound channel request.
8115 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8117 let events = nodes[1].node.get_and_clear_pending_events();
8119 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8120 nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8122 _ => panic!("Unexpected event"),
8125 let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8126 assert_eq!(close_msg_ev.len(), 1);
8128 match close_msg_ev[0] {
8129 MessageSendEvent::HandleError { ref node_id, .. } => {
8130 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8132 _ => panic!("Unexpected event"),
8135 // There should be no more events to process, as the channel was never opened.
8136 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
8140 fn test_can_not_accept_inbound_channel_twice() {
8141 let mut manually_accept_conf = UserConfig::default();
8142 manually_accept_conf.manually_accept_inbound_channels = true;
8143 let chanmon_cfgs = create_chanmon_cfgs(2);
8144 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8145 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8146 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8148 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8149 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8151 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8153 // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8154 // accepting the inbound channel request.
8155 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8157 let events = nodes[1].node.get_and_clear_pending_events();
8159 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8160 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
8161 let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
8163 Err(APIError::APIMisuseError { err }) => {
8164 assert_eq!(err, "No such channel awaiting to be accepted.");
8166 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
8167 Err(e) => panic!("Unexpected Error {:?}", e),
8170 _ => panic!("Unexpected event"),
8173 // Ensure that the channel wasn't closed after attempting to accept it twice.
8174 let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8175 assert_eq!(accept_msg_ev.len(), 1);
8177 match accept_msg_ev[0] {
8178 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8179 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8181 _ => panic!("Unexpected event"),
8186 fn test_can_not_accept_unknown_inbound_channel() {
8187 let chanmon_cfg = create_chanmon_cfgs(2);
8188 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
8189 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
8190 let nodes = create_network(2, &node_cfg, &node_chanmgr);
8192 let unknown_channel_id = ChannelId::new_zero();
8193 let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
8195 Err(APIError::APIMisuseError { err }) => {
8196 assert_eq!(err, "No such channel awaiting to be accepted.");
8198 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
8199 Err(e) => panic!("Unexpected Error: {:?}", e),
8204 fn test_onion_value_mpp_set_calculation() {
8205 // Test that we use the onion value `amt_to_forward` when
8206 // calculating whether we've reached the `total_msat` of an MPP
8207 // by having a routing node forward more than `amt_to_forward`
8208 // and checking that the receiving node doesn't generate
8209 // a PaymentClaimable event too early
8211 let chanmon_cfgs = create_chanmon_cfgs(node_count);
8212 let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8213 let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8214 let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8216 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8217 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8218 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8219 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8221 let total_msat = 100_000;
8222 let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8223 let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8224 let sample_path = route.paths.pop().unwrap();
8226 let mut path_1 = sample_path.clone();
8227 path_1.hops[0].pubkey = nodes[1].node.get_our_node_id();
8228 path_1.hops[0].short_channel_id = chan_1_id;
8229 path_1.hops[1].pubkey = nodes[3].node.get_our_node_id();
8230 path_1.hops[1].short_channel_id = chan_3_id;
8231 path_1.hops[1].fee_msat = 100_000;
8232 route.paths.push(path_1);
8234 let mut path_2 = sample_path.clone();
8235 path_2.hops[0].pubkey = nodes[2].node.get_our_node_id();
8236 path_2.hops[0].short_channel_id = chan_2_id;
8237 path_2.hops[1].pubkey = nodes[3].node.get_our_node_id();
8238 path_2.hops[1].short_channel_id = chan_4_id;
8239 path_2.hops[1].fee_msat = 1_000;
8240 route.paths.push(path_2);
8243 let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8244 let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8245 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8246 nodes[0].node.test_send_payment_internal(&route, our_payment_hash,
8247 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8248 check_added_monitors!(nodes[0], expected_paths.len());
8250 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8251 assert_eq!(events.len(), expected_paths.len());
8254 let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8255 let mut payment_event = SendEvent::from_event(ev);
8256 let mut prev_node = &nodes[0];
8258 for (idx, &node) in expected_paths[0].iter().enumerate() {
8259 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8261 if idx == 0 { // routing node
8262 let session_priv = [3; 32];
8263 let height = nodes[0].best_block_info().1;
8264 let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8265 let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8266 let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
8267 let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
8268 &recipient_onion_fields, height + 1, &None).unwrap();
8269 // Edit amt_to_forward to simulate the sender having set
8270 // the final amount and the routing node taking less fee
8271 if let msgs::OutboundOnionPayload::Receive {
8272 ref mut sender_intended_htlc_amt_msat, ..
8273 } = onion_payloads[1] {
8274 *sender_intended_htlc_amt_msat = 99_000;
8276 let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
8277 payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8280 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8281 check_added_monitors!(node, 0);
8282 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8283 expect_pending_htlcs_forwardable!(node);
8286 let mut events_2 = node.node.get_and_clear_pending_msg_events();
8287 assert_eq!(events_2.len(), 1);
8288 check_added_monitors!(node, 1);
8289 payment_event = SendEvent::from_event(events_2.remove(0));
8290 assert_eq!(payment_event.msgs.len(), 1);
8292 let events_2 = node.node.get_and_clear_pending_events();
8293 assert!(events_2.is_empty());
8300 let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8301 pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8303 claim_payment_along_route(
8304 ClaimAlongRouteArgs::new(&nodes[0], expected_paths, our_payment_preimage)
8308 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8310 let routing_node_count = msat_amounts.len();
8311 let node_count = routing_node_count + 2;
8313 let chanmon_cfgs = create_chanmon_cfgs(node_count);
8314 let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8315 let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8316 let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8321 // Create channels for each amount
8322 let mut expected_paths = Vec::with_capacity(routing_node_count);
8323 let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8324 let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8325 for i in 0..routing_node_count {
8326 let routing_node = 2 + i;
8327 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8328 src_chan_ids.push(src_chan_id);
8329 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8330 dst_chan_ids.push(dst_chan_id);
8331 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8332 expected_paths.push(path);
8334 let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8336 // Create a route for each amount
8337 let example_amount = 100000;
8338 let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8339 let sample_path = route.paths.pop().unwrap();
8340 for i in 0..routing_node_count {
8341 let routing_node = 2 + i;
8342 let mut path = sample_path.clone();
8343 path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id();
8344 path.hops[0].short_channel_id = src_chan_ids[i];
8345 path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8346 path.hops[1].short_channel_id = dst_chan_ids[i];
8347 path.hops[1].fee_msat = msat_amounts[i];
8348 route.paths.push(path);
8351 // Send payment with manually set total_msat
8352 let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8353 let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash,
8354 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8355 nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash,
8356 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8357 check_added_monitors!(nodes[src_idx], expected_paths.len());
8359 let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8360 assert_eq!(events.len(), expected_paths.len());
8361 let mut amount_received = 0;
8362 for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8363 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8365 let current_path_amount = msat_amounts[path_idx];
8366 amount_received += current_path_amount;
8367 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8368 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8371 claim_payment_along_route(
8372 ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, our_payment_preimage)
8377 fn test_overshoot_mpp() {
8378 do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8379 do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8383 fn test_simple_mpp() {
8384 // Simple test of sending a multi-path payment.
8385 let chanmon_cfgs = create_chanmon_cfgs(4);
8386 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8387 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8388 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8390 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8391 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8392 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8393 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8395 let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8396 let path = route.paths[0].clone();
8397 route.paths.push(path);
8398 route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
8399 route.paths[0].hops[0].short_channel_id = chan_1_id;
8400 route.paths[0].hops[1].short_channel_id = chan_3_id;
8401 route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
8402 route.paths[1].hops[0].short_channel_id = chan_2_id;
8403 route.paths[1].hops[1].short_channel_id = chan_4_id;
8404 send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8405 claim_payment_along_route(
8406 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage)
8411 fn test_preimage_storage() {
8412 // Simple test of payment preimage storage allowing no client-side storage to claim payments
8413 let chanmon_cfgs = create_chanmon_cfgs(2);
8414 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8415 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8416 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8418 create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8421 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8422 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8423 nodes[0].node.send_payment_with_route(&route, payment_hash,
8424 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8425 check_added_monitors!(nodes[0], 1);
8426 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8427 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8428 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8429 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8431 // Note that after leaving the above scope we have no knowledge of any arguments or return
8432 // values from previous calls.
8433 expect_pending_htlcs_forwardable!(nodes[1]);
8434 let events = nodes[1].node.get_and_clear_pending_events();
8435 assert_eq!(events.len(), 1);
8437 Event::PaymentClaimable { ref purpose, .. } => {
8439 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => {
8440 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8442 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
8445 _ => panic!("Unexpected event"),
8450 fn test_bad_secret_hash() {
8451 // Simple test of unregistered payment hash/invalid payment secret handling
8452 let chanmon_cfgs = create_chanmon_cfgs(2);
8453 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8454 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8455 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8457 create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8459 let random_payment_hash = PaymentHash([42; 32]);
8460 let random_payment_secret = PaymentSecret([43; 32]);
8461 let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8462 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8464 // All the below cases should end up being handled exactly identically, so we macro the
8465 // resulting events.
8466 macro_rules! handle_unknown_invalid_payment_data {
8467 ($payment_hash: expr) => {
8468 check_added_monitors!(nodes[0], 1);
8469 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8470 let payment_event = SendEvent::from_event(events.pop().unwrap());
8471 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8472 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8474 // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8475 // again to process the pending backwards-failure of the HTLC
8476 expect_pending_htlcs_forwardable!(nodes[1]);
8477 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8478 check_added_monitors!(nodes[1], 1);
8480 // We should fail the payment back
8481 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8482 match events.pop().unwrap() {
8483 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8484 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8485 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8487 _ => panic!("Unexpected event"),
8492 let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8493 // Error data is the HTLC value (100,000) and current block height
8494 let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8496 // Send a payment with the right payment hash but the wrong payment secret
8497 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8498 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8499 handle_unknown_invalid_payment_data!(our_payment_hash);
8500 expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8502 // Send a payment with a random payment hash, but the right payment secret
8503 nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8504 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8505 handle_unknown_invalid_payment_data!(random_payment_hash);
8506 expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8508 // Send a payment with a random payment hash and random payment secret
8509 nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8510 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8511 handle_unknown_invalid_payment_data!(random_payment_hash);
8512 expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8516 fn test_update_err_monitor_lockdown() {
8517 // Our monitor will lock update of local commitment transaction if a broadcastion condition
8518 // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8519 // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8522 // This scenario may happen in a watchtower setup, where watchtower process a block height
8523 // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8524 // commitment at same time.
8526 let chanmon_cfgs = create_chanmon_cfgs(2);
8527 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8528 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8529 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8531 // Create some initial channel
8532 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8533 let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8535 // Rebalance the network to generate htlc in the two directions
8536 send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8538 // Route a HTLC from node 0 to node 1 (but don't settle)
8539 let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8541 // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8542 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8543 let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8544 let persister = test_utils::TestPersister::new();
8547 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8548 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8549 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8550 assert!(new_monitor == *monitor);
8553 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8554 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8557 let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8558 // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8559 // transaction lock time requirements here.
8560 chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
8561 watchtower.chain_monitor.block_connected(&block, 200);
8563 // Try to update ChannelMonitor
8564 nodes[1].node.claim_funds(preimage);
8565 check_added_monitors!(nodes[1], 1);
8566 expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8568 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8569 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8570 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8572 let mut node_0_per_peer_lock;
8573 let mut node_0_peer_state_lock;
8574 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8575 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8576 assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8577 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8578 } else { assert!(false); }
8583 // Our local monitor is in-sync and hasn't processed yet timeout
8584 check_added_monitors!(nodes[0], 1);
8585 let events = nodes[0].node.get_and_clear_pending_events();
8586 assert_eq!(events.len(), 1);
8590 fn test_concurrent_monitor_claim() {
8591 // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8592 // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8593 // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8594 // state N+1 confirms. Alice claims output from state N+1.
8596 let chanmon_cfgs = create_chanmon_cfgs(2);
8597 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8598 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8599 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8601 // Create some initial channel
8602 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8603 let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8605 // Rebalance the network to generate htlc in the two directions
8606 send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8608 // Route a HTLC from node 0 to node 1 (but don't settle)
8609 route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8611 // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8612 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8613 let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8614 let persister = test_utils::TestPersister::new();
8615 let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
8616 Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
8618 let watchtower_alice = {
8620 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8621 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8622 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8623 assert!(new_monitor == *monitor);
8626 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8627 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8630 let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8631 // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
8632 // requirements here.
8633 const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
8634 alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
8635 watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
8637 // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8639 let mut txn = alice_broadcaster.txn_broadcast();
8640 assert_eq!(txn.len(), 2);
8641 check_spends!(txn[0], chan_1.3);
8642 check_spends!(txn[1], txn[0]);
8645 // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8646 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8647 let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8648 let persister = test_utils::TestPersister::new();
8649 let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
8650 let watchtower_bob = {
8652 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8653 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8654 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8655 assert!(new_monitor == *monitor);
8658 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8659 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8662 watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
8664 // Route another payment to generate another update with still previous HTLC pending
8665 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8666 nodes[1].node.send_payment_with_route(&route, payment_hash,
8667 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8668 check_added_monitors!(nodes[1], 1);
8670 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8671 assert_eq!(updates.update_add_htlcs.len(), 1);
8672 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8674 let mut node_0_per_peer_lock;
8675 let mut node_0_peer_state_lock;
8676 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8677 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8678 // Watchtower Alice should already have seen the block and reject the update
8679 assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8680 assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8681 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8682 } else { assert!(false); }
8687 // Our local monitor is in-sync and hasn't processed yet timeout
8688 check_added_monitors!(nodes[0], 1);
8690 //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8691 watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
8693 // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8696 let mut txn = bob_broadcaster.txn_broadcast();
8697 assert_eq!(txn.len(), 2);
8698 bob_state_y = txn.remove(0);
8701 // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8702 let height = HTLC_TIMEOUT_BROADCAST + 1;
8703 connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
8704 check_closed_broadcast(&nodes[0], 1, true);
8705 check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
8706 [nodes[1].node.get_our_node_id()], 100000);
8707 watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
8708 check_added_monitors(&nodes[0], 1);
8710 let htlc_txn = alice_broadcaster.txn_broadcast();
8711 assert_eq!(htlc_txn.len(), 1);
8712 check_spends!(htlc_txn[0], bob_state_y);
8717 fn test_pre_lockin_no_chan_closed_update() {
8718 // Test that if a peer closes a channel in response to a funding_created message we don't
8719 // generate a channel update (as the channel cannot appear on chain without a funding_signed
8722 // Doing so would imply a channel monitor update before the initial channel monitor
8723 // registration, violating our API guarantees.
8725 // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8726 // then opening a second channel with the same funding output as the first (which is not
8727 // rejected because the first channel does not exist in the ChannelManager) and closing it
8728 // before receiving funding_signed.
8729 let chanmon_cfgs = create_chanmon_cfgs(2);
8730 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8731 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8732 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8734 // Create an initial channel
8735 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8736 let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8737 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8738 let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8739 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8741 // Move the first channel through the funding flow...
8742 let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8744 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8745 check_added_monitors!(nodes[0], 0);
8747 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8748 let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
8749 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8750 assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8751 check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
8752 [nodes[1].node.get_our_node_id()], 100000);
8756 fn test_htlc_no_detection() {
8757 // This test is a mutation to underscore the detection logic bug we had
8758 // before #653. HTLC value routed is above the remaining balance, thus
8759 // inverting HTLC and `to_remote` output. HTLC will come second and
8760 // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8761 // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8762 // outputs order detection for correct spending children filtring.
8764 let chanmon_cfgs = create_chanmon_cfgs(2);
8765 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8766 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8767 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8769 // Create some initial channels
8770 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8772 send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8773 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8774 let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8775 assert_eq!(local_txn[0].input.len(), 1);
8776 assert_eq!(local_txn[0].output.len(), 3);
8777 check_spends!(local_txn[0], chan_1.3);
8779 // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8780 let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
8781 connect_block(&nodes[0], &block);
8782 // We deliberately connect the local tx twice as this should provoke a failure calling
8783 // this test before #653 fix.
8784 chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
8785 check_closed_broadcast!(nodes[0], true);
8786 check_added_monitors!(nodes[0], 1);
8787 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
8788 connect_blocks(&nodes[0], TEST_FINAL_CLTV);
8790 let htlc_timeout = {
8791 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8792 assert_eq!(node_txn.len(), 1);
8793 assert_eq!(node_txn[0].input.len(), 1);
8794 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8795 check_spends!(node_txn[0], local_txn[0]);
8799 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
8800 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8801 expect_payment_failed!(nodes[0], our_payment_hash, false);
8804 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8805 // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8806 // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8807 // Carol, Alice would be the upstream node, and Carol the downstream.)
8809 // Steps of the test:
8810 // 1) Alice sends a HTLC to Carol through Bob.
8811 // 2) Carol doesn't settle the HTLC.
8812 // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8813 // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8814 // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8815 // but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8816 // 5) Carol release the preimage to Bob off-chain.
8817 // 6) Bob claims the offered output on the broadcasted commitment.
8818 let chanmon_cfgs = create_chanmon_cfgs(3);
8819 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8820 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8821 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8823 // Create some initial channels
8824 let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8825 create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8827 // Steps (1) and (2):
8828 // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8829 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8831 // Check that Alice's commitment transaction now contains an output for this HTLC.
8832 let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8833 check_spends!(alice_txn[0], chan_ab.3);
8834 assert_eq!(alice_txn[0].output.len(), 2);
8835 check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8836 assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8837 assert_eq!(alice_txn.len(), 2);
8839 // Steps (3) and (4):
8840 // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8841 // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8842 let mut force_closing_node = 0; // Alice force-closes
8843 let mut counterparty_node = 1; // Bob if Alice force-closes
8846 if !broadcast_alice {
8847 force_closing_node = 1;
8848 counterparty_node = 0;
8850 nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
8851 check_closed_broadcast!(nodes[force_closing_node], true);
8852 check_added_monitors!(nodes[force_closing_node], 1);
8853 check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
8854 if go_onchain_before_fulfill {
8855 let txn_to_broadcast = match broadcast_alice {
8856 true => alice_txn.clone(),
8857 false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8859 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8860 if broadcast_alice {
8861 check_closed_broadcast!(nodes[1], true);
8862 check_added_monitors!(nodes[1], 1);
8863 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8868 // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8869 // process of removing the HTLC from their commitment transactions.
8870 nodes[2].node.claim_funds(payment_preimage);
8871 check_added_monitors!(nodes[2], 1);
8872 expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8874 let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8875 assert!(carol_updates.update_add_htlcs.is_empty());
8876 assert!(carol_updates.update_fail_htlcs.is_empty());
8877 assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8878 assert!(carol_updates.update_fee.is_none());
8879 assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8881 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8882 let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
8883 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
8884 // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8885 if !go_onchain_before_fulfill && broadcast_alice {
8886 let events = nodes[1].node.get_and_clear_pending_msg_events();
8887 assert_eq!(events.len(), 1);
8889 MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8890 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8892 _ => panic!("Unexpected event"),
8895 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8896 // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8897 // Carol<->Bob's updated commitment transaction info.
8898 check_added_monitors!(nodes[1], 2);
8900 let events = nodes[1].node.get_and_clear_pending_msg_events();
8901 assert_eq!(events.len(), 2);
8902 let bob_revocation = match events[0] {
8903 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8904 assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8907 _ => panic!("Unexpected event"),
8909 let bob_updates = match events[1] {
8910 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8911 assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8914 _ => panic!("Unexpected event"),
8917 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8918 check_added_monitors!(nodes[2], 1);
8919 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8920 check_added_monitors!(nodes[2], 1);
8922 let events = nodes[2].node.get_and_clear_pending_msg_events();
8923 assert_eq!(events.len(), 1);
8924 let carol_revocation = match events[0] {
8925 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8926 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8929 _ => panic!("Unexpected event"),
8931 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8932 check_added_monitors!(nodes[1], 1);
8934 // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8935 // here's where we put said channel's commitment tx on-chain.
8936 let mut txn_to_broadcast = alice_txn.clone();
8937 if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8938 if !go_onchain_before_fulfill {
8939 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8940 // If Bob was the one to force-close, he will have already passed these checks earlier.
8941 if broadcast_alice {
8942 check_closed_broadcast!(nodes[1], true);
8943 check_added_monitors!(nodes[1], 1);
8944 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8946 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8947 if broadcast_alice {
8948 assert_eq!(bob_txn.len(), 1);
8949 check_spends!(bob_txn[0], txn_to_broadcast[0]);
8951 if nodes[1].connect_style.borrow().updates_best_block_first() {
8952 assert_eq!(bob_txn.len(), 3);
8953 assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
8955 assert_eq!(bob_txn.len(), 2);
8957 check_spends!(bob_txn[0], chan_ab.3);
8962 // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8963 // broadcasted commitment transaction.
8965 let script_weight = match broadcast_alice {
8966 true => OFFERED_HTLC_SCRIPT_WEIGHT,
8967 false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8969 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8970 // Bob force-closed and broadcasts the commitment transaction along with a
8971 // HTLC-output-claiming transaction.
8972 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8973 if broadcast_alice {
8974 assert_eq!(bob_txn.len(), 1);
8975 check_spends!(bob_txn[0], txn_to_broadcast[0]);
8976 assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8978 assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
8979 let htlc_tx = bob_txn.pop().unwrap();
8980 check_spends!(htlc_tx, txn_to_broadcast[0]);
8981 assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
8987 fn test_onchain_htlc_settlement_after_close() {
8988 do_test_onchain_htlc_settlement_after_close(true, true);
8989 do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
8990 do_test_onchain_htlc_settlement_after_close(true, false);
8991 do_test_onchain_htlc_settlement_after_close(false, false);
8995 fn test_duplicate_temporary_channel_id_from_different_peers() {
8996 // Tests that we can accept two different `OpenChannel` requests with the same
8997 // `temporary_channel_id`, as long as they are from different peers.
8998 let chanmon_cfgs = create_chanmon_cfgs(3);
8999 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9000 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9001 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9003 // Create an first channel channel
9004 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9005 let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
9007 // Create an second channel
9008 nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
9009 let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
9011 // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
9012 // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
9013 open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
9015 // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
9016 // `temporary_channel_id` as they are from different peers.
9017 nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
9019 let events = nodes[0].node.get_and_clear_pending_msg_events();
9020 assert_eq!(events.len(), 1);
9022 MessageSendEvent::SendAcceptChannel { node_id, msg } => {
9023 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
9024 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
9026 _ => panic!("Unexpected event"),
9030 nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
9032 let events = nodes[0].node.get_and_clear_pending_msg_events();
9033 assert_eq!(events.len(), 1);
9035 MessageSendEvent::SendAcceptChannel { node_id, msg } => {
9036 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
9037 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
9039 _ => panic!("Unexpected event"),
9045 fn test_peer_funding_sidechannel() {
9046 // Test that if a peer somehow learns which txid we'll use for our channel funding before we
9047 // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously
9048 // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning
9049 // the txid and panicked if the peer tried to open a redundant channel to us with the same
9050 // funding outpoint.
9052 // While this assumption is generally safe, some users may have out-of-band protocols where
9053 // they notify their LSP about a funding outpoint first, or this may be violated in the future
9054 // with collaborative transaction construction protocols, i.e. dual-funding.
9055 let chanmon_cfgs = create_chanmon_cfgs(3);
9056 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9057 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9058 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9060 let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9061 let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0);
9063 let (_, tx, funding_output) =
9064 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9066 let cs_funding_events = nodes[2].node.get_and_clear_pending_events();
9067 assert_eq!(cs_funding_events.len(), 1);
9068 match cs_funding_events[0] {
9069 Event::FundingGenerationReady { .. } => {}
9070 _ => panic!("Unexpected event {:?}", cs_funding_events),
9073 nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap();
9074 let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id());
9075 nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9076 get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id());
9077 expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id());
9078 check_added_monitors!(nodes[0], 1);
9080 let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone());
9081 let err_msg = format!("{:?}", res.unwrap_err());
9082 assert!(err_msg.contains("An existing channel using outpoint "));
9083 assert!(err_msg.contains(" is open with peer"));
9084 // Even though the last funding_transaction_generated errored, it still generated a
9085 // SendFundingCreated. However, when the peer responds with a funding_signed it will send the
9086 // appropriate error message.
9087 let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9088 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created);
9089 check_added_monitors!(nodes[1], 1);
9090 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9091 let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
9092 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
9094 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9095 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9096 get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9100 fn test_duplicate_conflicting_funding_from_second_peer() {
9101 // Test that if a user tries to fund a channel with a funding outpoint they'd previously used
9102 // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we
9103 // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks
9104 // implies the user (and our counterparty) has reused cryptographic keys across channels, which
9105 // we require the user not do.
9106 let chanmon_cfgs = create_chanmon_cfgs(4);
9107 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9108 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9109 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9111 let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9113 let (_, tx, funding_output) =
9114 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9116 // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into
9117 // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails.
9118 let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3;
9119 let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone();
9120 nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap();
9122 nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9124 let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9125 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9126 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9127 check_added_monitors!(nodes[1], 1);
9128 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9130 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9131 // At this point, the channel should be closed, after having generated one monitor write (the
9132 // watch_channel call which failed), but zero monitor updates.
9133 check_added_monitors!(nodes[0], 1);
9134 get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9135 let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() };
9136 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]);
9140 fn test_duplicate_funding_err_in_funding() {
9141 // Test that if we have a live channel with one peer, then another peer comes along and tries
9142 // to create a second channel with the same txid we'll fail and not overwrite the
9143 // outpoint_to_peer map in `ChannelManager`.
9145 // This was previously broken.
9146 let chanmon_cfgs = create_chanmon_cfgs(3);
9147 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9148 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9149 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9151 let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
9152 let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
9153 assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
9155 nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
9156 let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9157 let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
9158 open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
9159 nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
9160 let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
9161 accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
9162 nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
9164 // Now that we have a second channel with the same funding txo, send a bogus funding message
9165 // and let nodes[1] remove the inbound channel.
9166 let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42);
9168 nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap();
9170 let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9171 funding_created_msg.temporary_channel_id = real_channel_id;
9172 // Make the signature invalid by changing the funding output
9173 funding_created_msg.funding_output_index += 10;
9174 nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9175 get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
9176 let err = "Invalid funding_created signature from peer".to_owned();
9177 let reason = ClosureReason::ProcessingError { err };
9178 let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason);
9179 check_closed_events(&nodes[1], &[expected_closing]);
9182 *nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(),
9183 nodes[0].node.get_our_node_id()
9188 fn test_duplicate_chan_id() {
9189 // Test that if a given peer tries to open a channel with the same channel_id as one that is
9190 // already open we reject it and keep the old channel.
9192 // Previously, full_stack_target managed to figure out that if you tried to open two channels
9193 // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
9194 // the existing channel when we detect the duplicate new channel, screwing up our monitor
9195 // updating logic for the existing channel.
9196 let chanmon_cfgs = create_chanmon_cfgs(2);
9197 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9198 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9199 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9201 // Create an initial channel
9202 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9203 let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9204 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9205 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9207 // Try to create a second channel with the same temporary_channel_id as the first and check
9208 // that it is rejected.
9209 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9211 let events = nodes[1].node.get_and_clear_pending_msg_events();
9212 assert_eq!(events.len(), 1);
9214 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9215 // Technically, at this point, nodes[1] would be justified in thinking both the
9216 // first (valid) and second (invalid) channels are closed, given they both have
9217 // the same non-temporary channel_id. However, currently we do not, so we just
9218 // move forward with it.
9219 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9220 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9222 _ => panic!("Unexpected event"),
9226 // Move the first channel through the funding flow...
9227 let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9229 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9230 check_added_monitors!(nodes[0], 0);
9232 let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9233 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9235 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
9236 assert_eq!(added_monitors.len(), 1);
9237 assert_eq!(added_monitors[0].0, funding_output);
9238 added_monitors.clear();
9240 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9242 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9244 let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
9245 let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
9247 // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
9250 // First try to open a second channel with a temporary channel id equal to the txid-based one.
9251 // Technically this is allowed by the spec, but we don't support it and there's little reason
9252 // to. Still, it shouldn't cause any other issues.
9253 open_chan_msg.common_fields.temporary_channel_id = channel_id;
9254 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9256 let events = nodes[1].node.get_and_clear_pending_msg_events();
9257 assert_eq!(events.len(), 1);
9259 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9260 // Technically, at this point, nodes[1] would be justified in thinking both
9261 // channels are closed, but currently we do not, so we just move forward with it.
9262 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9263 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9265 _ => panic!("Unexpected event"),
9269 // Now try to create a second channel which has a duplicate funding output.
9270 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9271 let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9272 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
9273 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9274 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
9276 let funding_created = {
9277 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9278 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9279 // Once we call `get_funding_created` the channel has a duplicate channel_id as
9280 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
9281 // try to create another channel. Instead, we drop the channel entirely here (leaving the
9282 // channelmanager in a possibly nonsense state instead).
9283 match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
9284 ChannelPhase::UnfundedOutboundV1(mut chan) => {
9285 let logger = test_utils::TestLogger::new();
9286 chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
9288 _ => panic!("Unexpected ChannelPhase variant"),
9291 check_added_monitors!(nodes[0], 0);
9292 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9293 // At this point we'll look up if the channel_id is present and immediately fail the channel
9294 // without trying to persist the `ChannelMonitor`.
9295 check_added_monitors!(nodes[1], 0);
9297 check_closed_events(&nodes[1], &[
9298 ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
9299 err: "Already had channel with the new channel_id".to_owned()
9303 // ...still, nodes[1] will reject the duplicate channel.
9305 let events = nodes[1].node.get_and_clear_pending_msg_events();
9306 assert_eq!(events.len(), 1);
9308 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9309 // Technically, at this point, nodes[1] would be justified in thinking both
9310 // channels are closed, but currently we do not, so we just move forward with it.
9311 assert_eq!(msg.channel_id, channel_id);
9312 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9314 _ => panic!("Unexpected event"),
9318 // finally, finish creating the original channel and send a payment over it to make sure
9319 // everything is functional.
9320 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9322 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9323 assert_eq!(added_monitors.len(), 1);
9324 assert_eq!(added_monitors[0].0, funding_output);
9325 added_monitors.clear();
9327 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9329 let events_4 = nodes[0].node.get_and_clear_pending_events();
9330 assert_eq!(events_4.len(), 0);
9331 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9332 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9334 let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9335 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9336 update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9338 send_payment(&nodes[0], &[&nodes[1]], 8000000);
9342 fn test_error_chans_closed() {
9343 // Test that we properly handle error messages, closing appropriate channels.
9345 // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9346 // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9347 // we can test various edge cases around it to ensure we don't regress.
9348 let chanmon_cfgs = create_chanmon_cfgs(3);
9349 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9350 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9351 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9353 // Create some initial channels
9354 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9355 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9356 let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
9358 assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9359 assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9360 assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9362 // Closing a channel from a different peer has no effect
9363 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9364 assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9366 // Closing one channel doesn't impact others
9367 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9368 check_added_monitors!(nodes[0], 1);
9369 check_closed_broadcast!(nodes[0], false);
9370 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9371 [nodes[1].node.get_our_node_id()], 100000);
9372 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9373 assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9374 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9375 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9377 // A null channel ID should close all channels
9378 let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9379 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
9380 check_added_monitors!(nodes[0], 2);
9381 check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9382 [nodes[1].node.get_our_node_id(); 2], 100000);
9383 let events = nodes[0].node.get_and_clear_pending_msg_events();
9384 assert_eq!(events.len(), 2);
9386 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9387 assert_eq!(msg.contents.flags & 2, 2);
9389 _ => panic!("Unexpected event"),
9392 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9393 assert_eq!(msg.contents.flags & 2, 2);
9395 _ => panic!("Unexpected event"),
9397 // Note that at this point users of a standard PeerHandler will end up calling
9398 // peer_disconnected.
9399 assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9400 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9402 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9403 assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9404 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9408 fn test_invalid_funding_tx() {
9409 // Test that we properly handle invalid funding transactions sent to us from a peer.
9411 // Previously, all other major lightning implementations had failed to properly sanitize
9412 // funding transactions from their counterparties, leading to a multi-implementation critical
9413 // security vulnerability (though we always sanitized properly, we've previously had
9414 // un-released crashes in the sanitization process).
9416 // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9417 // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9418 // gave up on it. We test this here by generating such a transaction.
9419 let chanmon_cfgs = create_chanmon_cfgs(2);
9420 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9421 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9422 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9424 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap();
9425 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9426 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9428 let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9430 // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9431 // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9432 // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9434 let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9435 let wit_program_script: ScriptBuf = wit_program.into();
9436 for output in tx.output.iter_mut() {
9437 // Make the confirmed funding transaction have a bogus script_pubkey
9438 output.script_pubkey = ScriptBuf::new_p2wsh(&wit_program_script.wscript_hash());
9441 nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9442 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9443 check_added_monitors!(nodes[1], 1);
9444 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9446 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9447 check_added_monitors!(nodes[0], 1);
9448 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9450 let events_1 = nodes[0].node.get_and_clear_pending_events();
9451 assert_eq!(events_1.len(), 0);
9453 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9454 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9455 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9457 let expected_err = "funding tx had wrong script/value or output index";
9458 confirm_transaction_at(&nodes[1], &tx, 1);
9459 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
9460 [nodes[0].node.get_our_node_id()], 100000);
9461 check_added_monitors!(nodes[1], 1);
9462 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9463 assert_eq!(events_2.len(), 1);
9464 if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9465 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9466 if let msgs::ErrorAction::DisconnectPeer { msg } = action {
9467 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err);
9468 } else { panic!(); }
9469 } else { panic!(); }
9470 assert_eq!(nodes[1].node.list_channels().len(), 0);
9472 // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9473 // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9474 // as its not 32 bytes long.
9475 let mut spend_tx = Transaction {
9476 version: Version::TWO, lock_time: LockTime::ZERO,
9477 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9478 previous_output: BitcoinOutPoint {
9482 script_sig: ScriptBuf::new(),
9483 sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9484 witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness())
9486 output: vec![TxOut {
9487 value: Amount::from_sat(1000),
9488 script_pubkey: ScriptBuf::new(),
9491 check_spends!(spend_tx, tx);
9492 mine_transaction(&nodes[1], &spend_tx);
9496 fn test_coinbase_funding_tx() {
9497 // Miners are able to fund channels directly from coinbase transactions, however
9498 // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
9499 // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
9500 // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
9502 // Note that 0conf channels with coinbase funding transactions are unaffected and are
9503 // immediately operational after opening.
9504 let chanmon_cfgs = create_chanmon_cfgs(2);
9505 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9506 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9507 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9509 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9510 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9512 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9513 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9515 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9517 // Create the coinbase funding transaction.
9518 let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9520 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9521 check_added_monitors!(nodes[0], 0);
9522 let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9524 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9525 check_added_monitors!(nodes[1], 1);
9526 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9528 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9530 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9531 check_added_monitors!(nodes[0], 1);
9533 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9534 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
9536 // Starting at height 0, we "confirm" the coinbase at height 1.
9537 confirm_transaction_at(&nodes[0], &tx, 1);
9538 // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
9539 connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
9540 // Check that we have no pending message events (we have not queued a `channel_ready` yet).
9541 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
9542 // Now connect one more block which results in 100 confirmations of the coinbase transaction.
9543 connect_blocks(&nodes[0], 1);
9544 // There should now be a `channel_ready` which can be handled.
9545 let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
9547 confirm_transaction_at(&nodes[1], &tx, 1);
9548 connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
9549 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
9550 connect_blocks(&nodes[1], 1);
9551 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
9552 create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
9555 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9556 // In the first version of the chain::Confirm interface, after a refactor was made to not
9557 // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9558 // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9559 // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9560 // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9561 // spending transaction until height N+1 (or greater). This was due to the way
9562 // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9563 // spending transaction at the height the input transaction was confirmed at, not whether we
9564 // should broadcast a spending transaction at the current height.
9565 // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9566 // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9567 // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9568 // until we learned about an additional block.
9570 // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9571 // aren't broadcasting transactions too early (ie not broadcasting them at all).
9572 let chanmon_cfgs = create_chanmon_cfgs(3);
9573 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9574 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9575 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9576 *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9578 create_announced_chan_between_nodes(&nodes, 0, 1);
9579 let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9580 let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9581 nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9582 nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9584 nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9585 check_closed_broadcast!(nodes[1], true);
9586 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
9587 check_added_monitors!(nodes[1], 1);
9588 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9589 assert_eq!(node_txn.len(), 1);
9591 let conf_height = nodes[1].best_block_info().1;
9592 if !test_height_before_timelock {
9593 connect_blocks(&nodes[1], 24 * 6);
9595 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9596 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9597 if test_height_before_timelock {
9598 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9599 // generate any events or broadcast any transactions
9600 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9601 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9603 // We should broadcast an HTLC transaction spending our funding transaction first
9604 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9605 assert_eq!(spending_txn.len(), 2);
9606 let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
9611 check_spends!(htlc_tx, node_txn[0]);
9612 // We should also generate a SpendableOutputs event with the to_self output (as its
9614 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9615 assert_eq!(descriptor_spend_txn.len(), 1);
9617 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9618 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9619 // additional block built on top of the current chain.
9620 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9621 &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
9622 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9623 check_added_monitors!(nodes[1], 1);
9625 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9626 assert!(updates.update_add_htlcs.is_empty());
9627 assert!(updates.update_fulfill_htlcs.is_empty());
9628 assert_eq!(updates.update_fail_htlcs.len(), 1);
9629 assert!(updates.update_fail_malformed_htlcs.is_empty());
9630 assert!(updates.update_fee.is_none());
9631 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9632 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9633 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9638 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9639 do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9640 do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9643 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9644 let chanmon_cfgs = create_chanmon_cfgs(2);
9645 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9646 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9647 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9649 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9651 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9652 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
9653 let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
9655 let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9658 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9659 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9660 check_added_monitors!(nodes[0], 1);
9661 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9662 assert_eq!(events.len(), 1);
9663 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9664 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9665 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9667 expect_pending_htlcs_forwardable!(nodes[1]);
9668 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9671 // Note that we use a different PaymentId here to allow us to duplicativly pay
9672 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9673 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9674 check_added_monitors!(nodes[0], 1);
9675 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9676 assert_eq!(events.len(), 1);
9677 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9678 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9679 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9680 // At this point, nodes[1] would notice it has too much value for the payment. It will
9681 // assume the second is a privacy attack (no longer particularly relevant
9682 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9683 // the first HTLC delivered above.
9686 expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9687 nodes[1].node.process_pending_htlc_forwards();
9689 if test_for_second_fail_panic {
9690 // Now we go fail back the first HTLC from the user end.
9691 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9693 let expected_destinations = vec![
9694 HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9695 HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9697 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations);
9698 nodes[1].node.process_pending_htlc_forwards();
9700 check_added_monitors!(nodes[1], 1);
9701 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9702 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9704 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9705 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9706 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9708 let failure_events = nodes[0].node.get_and_clear_pending_events();
9709 assert_eq!(failure_events.len(), 4);
9710 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9711 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9712 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9713 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9715 // Let the second HTLC fail and claim the first
9716 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9717 nodes[1].node.process_pending_htlc_forwards();
9719 check_added_monitors!(nodes[1], 1);
9720 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9721 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9722 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9724 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9726 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9731 fn test_dup_htlc_second_fail_panic() {
9732 // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9733 // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9734 // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9735 // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9736 do_test_dup_htlc_second_rejected(true);
9740 fn test_dup_htlc_second_rejected() {
9741 // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9742 // simply reject the second HTLC but are still able to claim the first HTLC.
9743 do_test_dup_htlc_second_rejected(false);
9747 fn test_inconsistent_mpp_params() {
9748 // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9749 // such HTLC and allow the second to stay.
9750 let chanmon_cfgs = create_chanmon_cfgs(4);
9751 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9752 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9753 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9755 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9756 create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9757 create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9758 let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9760 let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9761 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
9762 let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
9763 assert_eq!(route.paths.len(), 2);
9764 route.paths.sort_by(|path_a, _| {
9765 // Sort the path so that the path through nodes[1] comes first
9766 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9767 core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9770 let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9772 let cur_height = nodes[0].best_block_info().1;
9773 let payment_id = PaymentId([42; 32]);
9775 let session_privs = {
9776 // We create a fake route here so that we start with three pending HTLCs, which we'll
9777 // ultimately have, just not right away.
9778 let mut dup_route = route.clone();
9779 dup_route.paths.push(route.paths[1].clone());
9780 nodes[0].node.test_add_new_pending_payment(our_payment_hash,
9781 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap()
9783 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
9784 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9785 &None, session_privs[0]).unwrap();
9786 check_added_monitors!(nodes[0], 1);
9789 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9790 assert_eq!(events.len(), 1);
9791 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9793 assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9795 nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9796 RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9797 check_added_monitors!(nodes[0], 1);
9800 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9801 assert_eq!(events.len(), 1);
9802 let payment_event = SendEvent::from_event(events.pop().unwrap());
9804 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9805 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9807 expect_pending_htlcs_forwardable!(nodes[2]);
9808 check_added_monitors!(nodes[2], 1);
9810 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9811 assert_eq!(events.len(), 1);
9812 let payment_event = SendEvent::from_event(events.pop().unwrap());
9814 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9815 check_added_monitors!(nodes[3], 0);
9816 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9818 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9819 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9820 // post-payment_secrets) and fail back the new HTLC.
9822 expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9823 nodes[3].node.process_pending_htlc_forwards();
9824 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9825 nodes[3].node.process_pending_htlc_forwards();
9827 check_added_monitors!(nodes[3], 1);
9829 let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9830 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9831 commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9833 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9834 check_added_monitors!(nodes[2], 1);
9836 let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9837 nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9838 commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9840 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9842 nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9843 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9844 &None, session_privs[2]).unwrap();
9845 check_added_monitors!(nodes[0], 1);
9847 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9848 assert_eq!(events.len(), 1);
9849 pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9851 do_claim_payment_along_route(
9852 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage)
9854 expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
9858 fn test_double_partial_claim() {
9859 // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9860 // time out, the sender resends only some of the MPP parts, then the user processes the
9861 // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9863 let chanmon_cfgs = create_chanmon_cfgs(4);
9864 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9865 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9866 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9868 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9869 create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9870 create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9871 create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9873 let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9874 assert_eq!(route.paths.len(), 2);
9875 route.paths.sort_by(|path_a, _| {
9876 // Sort the path so that the path through nodes[1] comes first
9877 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9878 core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9881 send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9882 // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9883 // amount of time to respond to.
9885 // Connect some blocks to time out the payment
9886 connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9887 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9889 let failed_destinations = vec![
9890 HTLCDestination::FailedPayment { payment_hash },
9891 HTLCDestination::FailedPayment { payment_hash },
9893 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9895 pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
9897 // nodes[1] now retries one of the two paths...
9898 nodes[0].node.send_payment_with_route(&route, payment_hash,
9899 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9900 check_added_monitors!(nodes[0], 2);
9902 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9903 assert_eq!(events.len(), 2);
9904 let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9905 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9907 // At this point nodes[3] has received one half of the payment, and the user goes to handle
9908 // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9909 nodes[3].node.claim_funds(payment_preimage);
9910 check_added_monitors!(nodes[3], 0);
9911 assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9914 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9915 #[derive(Clone, Copy, PartialEq)]
9916 enum ExposureEvent {
9917 /// Breach occurs at HTLC forwarding (see `send_htlc`)
9919 /// Breach occurs at HTLC reception (see `update_add_htlc`)
9921 /// Breach occurs at outbound update_fee (see `send_update_fee`)
9922 AtUpdateFeeOutbound,
9925 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) {
9926 // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9929 // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9930 // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9931 // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9932 // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9933 // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9934 // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9935 // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9936 // might be available again for HTLC processing once the dust bandwidth has cleared up.
9938 let chanmon_cfgs = create_chanmon_cfgs(2);
9939 let mut config = test_default_channel_config();
9941 // We hard-code the feerate values here but they're re-calculated furter down and asserted.
9942 // If the values ever change below these constants should simply be updated.
9943 const AT_FEE_OUTBOUND_HTLCS: u64 = 20;
9944 let nondust_htlc_count_in_limit =
9945 if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
9946 AT_FEE_OUTBOUND_HTLCS
9948 let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 };
9949 let expected_dust_buffer_feerate = initial_feerate + 2530;
9950 let mut commitment_tx_cost = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty());
9951 commitment_tx_cost +=
9953 htlc_success_tx_weight(&ChannelTypeFeatures::empty())
9955 htlc_timeout_tx_weight(&ChannelTypeFeatures::empty())
9956 } * (initial_feerate as u64 - 253) / 1000 * nondust_htlc_count_in_limit;
9958 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9959 *feerate_lock = initial_feerate;
9961 config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
9962 // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
9963 // to get roughly the same initial value as the default setting when this test was
9964 // originally written.
9965 MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost) / 253)
9966 } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost) };
9967 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9968 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9969 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9971 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
9972 let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9973 open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
9974 open_channel.common_fields.max_accepted_htlcs = 60;
9976 open_channel.common_fields.dust_limit_satoshis = 546;
9978 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9979 let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9980 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9982 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9984 let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9987 let mut node_0_per_peer_lock;
9988 let mut node_0_peer_state_lock;
9989 match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
9990 ChannelPhase::UnfundedOutboundV1(chan) => {
9991 chan.context.holder_dust_limit_satoshis = 546;
9993 _ => panic!("Unexpected ChannelPhase variant"),
9997 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9998 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9999 check_added_monitors!(nodes[1], 1);
10000 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10002 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
10003 check_added_monitors!(nodes[0], 1);
10004 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
10006 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
10007 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
10008 update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
10011 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10012 *feerate_lock = 253;
10015 // Fetch a route in advance as we will be unable to once we're unable to send.
10016 let (mut route, payment_hash, _, payment_secret) =
10017 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
10019 let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
10020 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10021 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10022 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
10023 (chan.context().get_dust_buffer_feerate(None) as u64,
10024 chan.context().get_max_dust_htlc_exposure_msat(253))
10026 assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64);
10027 let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
10028 let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
10030 // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit.
10031 // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1
10032 // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`.
10033 let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10034 let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
10036 // This test was written with a fixed dust value here, which we retain, but assert that it is,
10037 // indeed, dust on both transactions.
10038 let dust_htlc_on_counterparty_tx: u64 = 4;
10039 let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000;
10040 let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10041 assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat);
10042 assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat);
10045 if dust_outbound_balance {
10046 // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10047 // Outbound dust balance: 4372 sats
10048 // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
10049 for _ in 0..dust_outbound_htlc_on_holder_tx {
10050 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
10051 nodes[0].node.send_payment_with_route(&route, payment_hash,
10052 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10055 // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10056 // Inbound dust balance: 4372 sats
10057 // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
10058 for _ in 0..dust_inbound_htlc_on_holder_tx {
10059 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
10063 if dust_outbound_balance {
10064 // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10065 // Outbound dust balance: 5000 sats
10066 for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10067 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
10068 nodes[0].node.send_payment_with_route(&route, payment_hash,
10069 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10072 // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10073 // Inbound dust balance: 5000 sats
10074 for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10075 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
10080 if exposure_breach_event == ExposureEvent::AtHTLCForward {
10081 route.paths[0].hops.last_mut().unwrap().fee_msat =
10082 if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
10083 // With default dust exposure: 5000 sats
10085 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10086 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10087 ), true, APIError::ChannelUnavailable { .. }, {});
10089 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10090 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10091 ), true, APIError::ChannelUnavailable { .. }, {});
10093 } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
10094 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
10095 nodes[1].node.send_payment_with_route(&route, payment_hash,
10096 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10097 check_added_monitors!(nodes[1], 1);
10098 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
10099 assert_eq!(events.len(), 1);
10100 let payment_event = SendEvent::from_event(events.remove(0));
10101 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
10102 // With default dust exposure: 5000 sats
10104 // Outbound dust balance: 6399 sats
10105 let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
10106 let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
10107 nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
10109 // Outbound dust balance: 5200 sats
10110 nodes[0].logger.assert_log("lightning::ln::channel",
10111 format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
10112 dust_htlc_on_counterparty_tx_msat * dust_htlc_on_counterparty_tx + commitment_tx_cost + 4,
10113 max_dust_htlc_exposure_msat), 1);
10115 } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
10116 route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
10117 // For the multiplier dust exposure limit, since it scales with feerate,
10118 // we need to add a lot of HTLCs that will become dust at the new feerate
10119 // to cross the threshold.
10120 for _ in 0..AT_FEE_OUTBOUND_HTLCS {
10121 let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
10122 nodes[0].node.send_payment_with_route(&route, payment_hash,
10123 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10126 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10127 *feerate_lock = *feerate_lock * 10;
10129 nodes[0].node.timer_tick_occurred();
10130 check_added_monitors!(nodes[0], 1);
10131 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
10134 let _ = nodes[0].node.get_and_clear_pending_msg_events();
10135 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
10136 added_monitors.clear();
10139 fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) {
10140 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10141 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10142 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10143 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10144 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10145 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10146 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10147 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10148 if !multiplier_dust_limit && !apply_excess_fee {
10149 // Because non-dust HTLC transaction fees are included in the dust exposure, trying to
10150 // increase the fee to hit a higher dust exposure with a
10151 // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these
10152 // in the `multiplier_dust_limit` case.
10153 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10154 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10155 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10156 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10161 fn test_max_dust_htlc_exposure() {
10162 do_test_max_dust_htlc_exposure_by_threshold_type(false, false);
10163 do_test_max_dust_htlc_exposure_by_threshold_type(false, true);
10164 do_test_max_dust_htlc_exposure_by_threshold_type(true, false);
10165 do_test_max_dust_htlc_exposure_by_threshold_type(true, true);
10169 fn test_nondust_htlc_fees_are_dust() {
10170 // Test that the transaction fees paid in nondust HTLCs count towards our dust limit
10171 let chanmon_cfgs = create_chanmon_cfgs(3);
10172 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10174 let mut config = test_default_channel_config();
10175 // Set the dust limit to the default value
10176 config.channel_config.max_dust_htlc_exposure =
10177 MaxDustHTLCExposure::FeeRateMultiplier(10_000);
10178 // Make sure the HTLC limits don't get in the way
10179 config.channel_handshake_limits.min_max_accepted_htlcs = 400;
10180 config.channel_handshake_config.our_max_accepted_htlcs = 400;
10181 config.channel_handshake_config.our_htlc_minimum_msat = 1;
10183 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
10184 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10186 // Create a channel from 1 -> 0 but immediately push all of the funds towards 0
10187 let chan_id_1 = create_announced_chan_between_nodes(&nodes, 1, 0).2;
10188 while nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat > 0 {
10189 send_payment(&nodes[1], &[&nodes[0]], nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat);
10192 // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs
10193 // repeatedly until we run out of space.
10194 const HTLC_VALUE: u64 = 1_000_000; // Doesn't matter, tune until the test passes
10195 let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE).0;
10197 while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 {
10198 route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE);
10200 assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0,
10201 "We don't want to run out of ability to send because of some non-dust limit");
10202 assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10,
10203 "We should be able to fill our dust limit without too many HTLCs");
10205 let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat;
10206 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
10207 assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0,
10208 "Make sure we are able to send once we clear one HTLC");
10210 // At this point we have somewhere between dust_limit and dust_limit * 2 left in our dust
10211 // exposure limit, and we want to max that out using non-dust HTLCs.
10212 let commitment_tx_per_htlc_cost =
10213 htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * 253;
10214 let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost;
10215 assert!(max_htlcs_remaining < 30,
10216 "We should be able to fill our dust limit without too many HTLCs");
10217 for i in 0..max_htlcs_remaining + 1 {
10218 assert_ne!(i, max_htlcs_remaining);
10219 if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat < dust_limit {
10220 // We found our limit, and it was less than max_htlcs_remaining!
10221 // At this point we can only send dust HTLCs as any non-dust HTLCs will overuse our
10222 // remaining dust exposure.
10225 route_payment(&nodes[0], &[&nodes[1]], dust_limit * 2);
10228 // At this point non-dust HTLCs are no longer accepted from node 0 -> 1, we also check that
10229 // such HTLCs can't be routed over the same channel either.
10230 create_announced_chan_between_nodes(&nodes, 2, 0);
10231 let (route, payment_hash, _, payment_secret) =
10232 get_route_and_payment_hash!(nodes[2], nodes[1], dust_limit * 2);
10233 let onion = RecipientOnionFields::secret_only(payment_secret);
10234 nodes[2].node.send_payment_with_route(&route, payment_hash, onion, PaymentId([0; 32])).unwrap();
10235 check_added_monitors(&nodes[2], 1);
10236 let send = SendEvent::from_node(&nodes[2]);
10238 nodes[0].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send.msgs[0]);
10239 commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true);
10241 expect_pending_htlcs_forwardable!(nodes[0]);
10242 check_added_monitors(&nodes[0], 1);
10243 let node_id_1 = nodes[1].node.get_our_node_id();
10244 expect_htlc_handling_failed_destinations!(
10245 nodes[0].node.get_and_clear_pending_events(),
10246 &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }]
10249 let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id());
10250 nodes[2].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]);
10251 commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false);
10252 expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new());
10257 fn test_non_final_funding_tx() {
10258 let chanmon_cfgs = create_chanmon_cfgs(2);
10259 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10260 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10261 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10263 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10264 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10265 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10266 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10267 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10269 let best_height = nodes[0].node.best_block.read().unwrap().height;
10271 let chan_id = *nodes[0].network_chan_count.borrow();
10272 let events = nodes[0].node.get_and_clear_pending_events();
10273 let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[&[1]]) };
10274 assert_eq!(events.len(), 1);
10275 let mut tx = match events[0] {
10276 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10277 // Timelock the transaction _beyond_ the best client height + 1.
10278 Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut {
10279 value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(),
10282 _ => panic!("Unexpected event"),
10284 // Transaction should fail as it's evaluated as non-final for propagation.
10285 match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
10286 Err(APIError::APIMisuseError { err }) => {
10287 assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
10291 let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned();
10292 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]);
10293 assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel");
10297 fn test_non_final_funding_tx_within_headroom() {
10298 let chanmon_cfgs = create_chanmon_cfgs(2);
10299 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10300 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10301 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10303 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10304 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10305 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10306 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10307 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10309 let best_height = nodes[0].node.best_block.read().unwrap().height;
10311 let chan_id = *nodes[0].network_chan_count.borrow();
10312 let events = nodes[0].node.get_and_clear_pending_events();
10313 let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[[1]]) };
10314 assert_eq!(events.len(), 1);
10315 let mut tx = match events[0] {
10316 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10317 // Timelock the transaction within a +1 headroom from the best block.
10318 Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut {
10319 value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(),
10322 _ => panic!("Unexpected event"),
10325 // Transaction should be accepted if it's in a +1 headroom from best block.
10326 assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
10327 get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
10331 fn accept_busted_but_better_fee() {
10332 // If a peer sends us a fee update that is too low, but higher than our previous channel
10333 // feerate, we should accept it. In the future we may want to consider closing the channel
10334 // later, but for now we only accept the update.
10335 let mut chanmon_cfgs = create_chanmon_cfgs(2);
10336 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10337 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10338 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10340 create_chan_between_nodes(&nodes[0], &nodes[1]);
10342 // Set nodes[1] to expect 5,000 sat/kW.
10344 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
10345 *feerate_lock = 5000;
10348 // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
10350 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10351 *feerate_lock = 1000;
10353 nodes[0].node.timer_tick_occurred();
10354 check_added_monitors!(nodes[0], 1);
10356 let events = nodes[0].node.get_and_clear_pending_msg_events();
10357 assert_eq!(events.len(), 1);
10359 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10360 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10361 commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10363 _ => panic!("Unexpected event"),
10366 // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
10369 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10370 *feerate_lock = 2000;
10372 nodes[0].node.timer_tick_occurred();
10373 check_added_monitors!(nodes[0], 1);
10375 let events = nodes[0].node.get_and_clear_pending_msg_events();
10376 assert_eq!(events.len(), 1);
10378 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10379 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10380 commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10382 _ => panic!("Unexpected event"),
10385 // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
10388 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10389 *feerate_lock = 1000;
10391 nodes[0].node.timer_tick_occurred();
10392 check_added_monitors!(nodes[0], 1);
10394 let events = nodes[0].node.get_and_clear_pending_msg_events();
10395 assert_eq!(events.len(), 1);
10397 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
10398 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10399 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
10400 err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000".to_owned() },
10401 [nodes[0].node.get_our_node_id()], 100000);
10402 check_closed_broadcast!(nodes[1], true);
10403 check_added_monitors!(nodes[1], 1);
10405 _ => panic!("Unexpected event"),
10409 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
10410 let mut chanmon_cfgs = create_chanmon_cfgs(2);
10411 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10412 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10413 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10414 let min_final_cltv_expiry_delta = 120;
10415 let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
10416 min_final_cltv_expiry_delta - 2 };
10417 let recv_value = 100_000;
10419 create_chan_between_nodes(&nodes[0], &nodes[1]);
10421 let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
10422 let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
10423 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
10424 Some(recv_value), Some(min_final_cltv_expiry_delta));
10425 (payment_hash, payment_preimage, payment_secret)
10427 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
10428 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
10430 let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap();
10431 nodes[0].node.send_payment_with_route(&route, payment_hash,
10432 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10433 check_added_monitors!(nodes[0], 1);
10434 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
10435 assert_eq!(events.len(), 1);
10436 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
10437 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
10438 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
10439 expect_pending_htlcs_forwardable!(nodes[1]);
10442 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
10443 None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
10445 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
10447 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
10449 check_added_monitors!(nodes[1], 1);
10451 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
10452 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
10453 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
10455 expect_payment_failed!(nodes[0], payment_hash, true);
10460 fn test_payment_with_custom_min_cltv_expiry_delta() {
10461 do_payment_with_custom_min_final_cltv_expiry(false, false);
10462 do_payment_with_custom_min_final_cltv_expiry(false, true);
10463 do_payment_with_custom_min_final_cltv_expiry(true, false);
10464 do_payment_with_custom_min_final_cltv_expiry(true, true);
10468 fn test_disconnects_peer_awaiting_response_ticks() {
10469 // Tests that nodes which are awaiting on a response critical for channel responsiveness
10470 // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10471 let mut chanmon_cfgs = create_chanmon_cfgs(2);
10472 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10473 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10474 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10476 // Asserts a disconnect event is queued to the user.
10477 let check_disconnect_event = |node: &Node, should_disconnect: bool| {
10478 let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
10479 if let MessageSendEvent::HandleError { action, .. } = event {
10480 if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
10489 assert_eq!(disconnect_event.is_some(), should_disconnect);
10492 // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
10493 // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10494 let check_disconnect = |node: &Node| {
10495 // No disconnect without any timer ticks.
10496 check_disconnect_event(node, false);
10498 // No disconnect with 1 timer tick less than required.
10499 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
10500 node.node.timer_tick_occurred();
10501 check_disconnect_event(node, false);
10504 // Disconnect after reaching the required ticks.
10505 node.node.timer_tick_occurred();
10506 check_disconnect_event(node, true);
10508 // Disconnect again on the next tick if the peer hasn't been disconnected yet.
10509 node.node.timer_tick_occurred();
10510 check_disconnect_event(node, true);
10513 create_chan_between_nodes(&nodes[0], &nodes[1]);
10515 // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
10516 *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
10517 nodes[0].node.timer_tick_occurred();
10518 check_added_monitors!(&nodes[0], 1);
10519 let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10520 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
10521 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
10522 check_added_monitors!(&nodes[1], 1);
10524 // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
10525 let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
10526 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
10527 check_added_monitors!(&nodes[0], 1);
10528 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
10529 check_added_monitors(&nodes[0], 1);
10531 // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
10532 // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
10533 // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10534 let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
10535 check_disconnect(&nodes[1]);
10537 // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
10539 // Note that since the commitment dance didn't complete above, Alice is expected to resend her
10540 // final `RevokeAndACK` to Bob to complete it.
10541 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10542 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10543 let bob_init = msgs::Init {
10544 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10546 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
10547 let alice_init = msgs::Init {
10548 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10550 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
10552 // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
10553 // received Bob's yet, so she should disconnect him after reaching
10554 // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10555 let alice_channel_reestablish = get_event_msg!(
10556 nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
10558 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
10559 check_disconnect(&nodes[0]);
10561 // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
10562 let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
10563 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
10564 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
10570 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
10572 // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
10573 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10574 nodes[0].node.timer_tick_occurred();
10575 check_disconnect_event(&nodes[0], false);
10578 // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
10579 // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10580 check_disconnect(&nodes[1]);
10582 // Finally, have Bob process the last message.
10583 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
10584 check_added_monitors(&nodes[1], 1);
10586 // At this point, neither node should attempt to disconnect each other, since they aren't
10587 // waiting on any messages.
10588 for node in &nodes {
10589 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10590 node.node.timer_tick_occurred();
10591 check_disconnect_event(node, false);
10597 fn test_remove_expired_outbound_unfunded_channels() {
10598 let chanmon_cfgs = create_chanmon_cfgs(2);
10599 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10600 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10601 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10603 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10604 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10605 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10606 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10607 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10609 let events = nodes[0].node.get_and_clear_pending_events();
10610 assert_eq!(events.len(), 1);
10612 Event::FundingGenerationReady { .. } => (),
10613 _ => panic!("Unexpected event"),
10616 // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
10617 let check_outbound_channel_existence = |should_exist: bool| {
10618 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10619 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10620 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10623 // Channel should exist without any timer ticks.
10624 check_outbound_channel_existence(true);
10626 // Channel should exist with 1 timer tick less than required.
10627 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10628 nodes[0].node.timer_tick_occurred();
10629 check_outbound_channel_existence(true)
10632 // Remove channel after reaching the required ticks.
10633 nodes[0].node.timer_tick_occurred();
10634 check_outbound_channel_existence(false);
10636 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10637 assert_eq!(msg_events.len(), 1);
10638 match msg_events[0] {
10639 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10640 assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10642 _ => panic!("Unexpected event"),
10644 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
10648 fn test_remove_expired_inbound_unfunded_channels() {
10649 let chanmon_cfgs = create_chanmon_cfgs(2);
10650 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10651 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10652 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10654 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10655 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10656 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10657 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10658 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10660 let events = nodes[0].node.get_and_clear_pending_events();
10661 assert_eq!(events.len(), 1);
10663 Event::FundingGenerationReady { .. } => (),
10664 _ => panic!("Unexpected event"),
10667 // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
10668 let check_inbound_channel_existence = |should_exist: bool| {
10669 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
10670 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
10671 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10674 // Channel should exist without any timer ticks.
10675 check_inbound_channel_existence(true);
10677 // Channel should exist with 1 timer tick less than required.
10678 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10679 nodes[1].node.timer_tick_occurred();
10680 check_inbound_channel_existence(true)
10683 // Remove channel after reaching the required ticks.
10684 nodes[1].node.timer_tick_occurred();
10685 check_inbound_channel_existence(false);
10687 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
10688 assert_eq!(msg_events.len(), 1);
10689 match msg_events[0] {
10690 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10691 assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10693 _ => panic!("Unexpected event"),
10695 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
10699 fn test_channel_close_when_not_timely_accepted() {
10700 // Create network of two nodes
10701 let chanmon_cfgs = create_chanmon_cfgs(2);
10702 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10703 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10704 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10706 // Simulate peer-disconnects mid-handshake
10707 // The channel is initiated from the node 0 side,
10708 // but the nodes disconnect before node 1 could send accept channel
10709 let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10710 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10711 assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10713 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10714 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10716 // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10717 assert_eq!(nodes[0].node.list_channels().len(), 1);
10719 // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
10720 assert_eq!(nodes[1].node.list_channels().len(), 0);
10722 // In the meantime, some time passes.
10723 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
10724 nodes[0].node.timer_tick_occurred();
10727 // Since we disconnected from peer and did not connect back within time,
10728 // we should have forced-closed the channel by now.
10729 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
10730 assert_eq!(nodes[0].node.list_channels().len(), 0);
10733 // Since accept channel message was never received
10734 // The channel should be forced close by now from node 0 side
10735 // and the peer removed from per_peer_state
10736 let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10737 assert_eq!(node_0_per_peer_state.len(), 0);
10742 fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
10743 // Create network of two nodes
10744 let chanmon_cfgs = create_chanmon_cfgs(2);
10745 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10746 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10747 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10749 // Simulate peer-disconnects mid-handshake
10750 // The channel is initiated from the node 0 side,
10751 // but the nodes disconnect before node 1 could send accept channel
10752 let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10753 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10754 assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10756 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10757 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10759 // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10760 assert_eq!(nodes[0].node.list_channels().len(), 1);
10762 // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
10763 assert_eq!(nodes[1].node.list_channels().len(), 0);
10765 // The peers now reconnect
10766 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
10767 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10769 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
10770 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10771 }, false).unwrap();
10773 // Make sure the SendOpenChannel message is added to node_0 pending message events
10774 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10775 assert_eq!(msg_events.len(), 1);
10776 match &msg_events[0] {
10777 MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
10778 _ => panic!("Unexpected message."),
10782 fn do_test_multi_post_event_actions(do_reload: bool) {
10783 // Tests handling multiple post-Event actions at once.
10784 // There is specific code in ChannelManager to handle channels where multiple post-Event
10785 // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
10787 // Specifically, we test calling `get_and_clear_pending_events` while there are two
10788 // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
10789 // - one from an RAA and one from an inbound commitment_signed.
10790 let chanmon_cfgs = create_chanmon_cfgs(3);
10791 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10792 let (persister, chain_monitor);
10793 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10794 let nodes_0_deserialized;
10795 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10797 let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
10798 let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
10800 send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10801 send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10803 let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10804 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10806 nodes[1].node.claim_funds(our_payment_preimage);
10807 check_added_monitors!(nodes[1], 1);
10808 expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
10810 nodes[2].node.claim_funds(payment_preimage_2);
10811 check_added_monitors!(nodes[2], 1);
10812 expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
10814 for dest in &[1, 2] {
10815 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
10816 nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
10817 commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
10818 check_added_monitors(&nodes[0], 0);
10821 let (route, payment_hash_3, _, payment_secret_3) =
10822 get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
10823 let payment_id = PaymentId(payment_hash_3.0);
10824 nodes[1].node.send_payment_with_route(&route, payment_hash_3,
10825 RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
10826 check_added_monitors(&nodes[1], 1);
10828 let send_event = SendEvent::from_node(&nodes[1]);
10829 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
10830 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
10831 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
10834 let nodes_0_serialized = nodes[0].node.encode();
10835 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
10836 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
10837 reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
10839 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10840 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10842 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
10843 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
10846 let events = nodes[0].node.get_and_clear_pending_events();
10847 assert_eq!(events.len(), 4);
10848 if let Event::PaymentSent { payment_preimage, .. } = events[0] {
10849 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10850 } else { panic!(); }
10851 if let Event::PaymentSent { payment_preimage, .. } = events[1] {
10852 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10853 } else { panic!(); }
10854 if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
10855 if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
10857 // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
10858 // completion, we'll respond to nodes[1] with an RAA + CS.
10859 get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10860 check_added_monitors(&nodes[0], 3);
10864 fn test_multi_post_event_actions() {
10865 do_test_multi_post_event_actions(true);
10866 do_test_multi_post_event_actions(false);
10870 fn test_batch_channel_open() {
10871 let chanmon_cfgs = create_chanmon_cfgs(3);
10872 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10873 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10874 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10876 // Initiate channel opening and create the batch channel funding transaction.
10877 let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10878 (&nodes[1], 100_000, 0, 42, None),
10879 (&nodes[2], 200_000, 0, 43, None),
10882 // Go through the funding_created and funding_signed flow with node 1.
10883 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10884 check_added_monitors(&nodes[1], 1);
10885 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10887 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10888 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10889 check_added_monitors(&nodes[0], 1);
10891 // The transaction should not have been broadcast before all channels are ready.
10892 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
10894 // Go through the funding_created and funding_signed flow with node 2.
10895 nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
10896 check_added_monitors(&nodes[2], 1);
10897 expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
10899 let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10900 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
10901 nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
10902 check_added_monitors(&nodes[0], 1);
10904 // The transaction should not have been broadcast before persisting all monitors has been
10906 assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10907 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
10909 // Complete the persistence of the monitor.
10910 nodes[0].chain_monitor.complete_sole_pending_chan_update(
10911 &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
10913 let events = nodes[0].node.get_and_clear_pending_events();
10915 // The transaction should only have been broadcast now.
10916 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10917 assert_eq!(broadcasted_txs.len(), 1);
10918 assert_eq!(broadcasted_txs[0], tx);
10920 assert_eq!(events.len(), 2);
10921 assert!(events.iter().any(|e| matches!(
10923 crate::events::Event::ChannelPending {
10924 ref counterparty_node_id,
10926 } if counterparty_node_id == &nodes[1].node.get_our_node_id(),
10928 assert!(events.iter().any(|e| matches!(
10930 crate::events::Event::ChannelPending {
10931 ref counterparty_node_id,
10933 } if counterparty_node_id == &nodes[2].node.get_our_node_id(),
10938 fn test_close_in_funding_batch() {
10939 // This test ensures that if one of the channels
10940 // in the batch closes, the complete batch will close.
10941 let chanmon_cfgs = create_chanmon_cfgs(3);
10942 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10943 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10944 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10946 // Initiate channel opening and create the batch channel funding transaction.
10947 let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10948 (&nodes[1], 100_000, 0, 42, None),
10949 (&nodes[2], 200_000, 0, 43, None),
10952 // Go through the funding_created and funding_signed flow with node 1.
10953 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10954 check_added_monitors(&nodes[1], 1);
10955 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10957 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10958 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10959 check_added_monitors(&nodes[0], 1);
10961 // The transaction should not have been broadcast before all channels are ready.
10962 assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10964 // Force-close the channel for which we've completed the initial monitor.
10965 let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
10966 let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
10967 let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
10968 let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
10970 nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
10972 // The monitor should become closed.
10973 check_added_monitors(&nodes[0], 1);
10975 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
10976 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
10977 assert_eq!(monitor_updates_1.len(), 1);
10978 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
10981 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10982 match msg_events[0] {
10983 MessageSendEvent::HandleError { .. } => (),
10984 _ => panic!("Unexpected message."),
10987 // We broadcast the commitment transaction as part of the force-close.
10989 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10990 assert_eq!(broadcasted_txs.len(), 1);
10991 assert!(broadcasted_txs[0].txid() != tx.txid());
10992 assert_eq!(broadcasted_txs[0].input.len(), 1);
10993 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
10996 // All channels in the batch should close immediately.
10997 check_closed_events(&nodes[0], &[
10998 ExpectedCloseEvent {
10999 channel_id: Some(channel_id_1),
11000 discard_funding: true,
11001 channel_funding_txo: Some(funding_txo_1),
11002 user_channel_id: Some(42),
11003 ..Default::default()
11005 ExpectedCloseEvent {
11006 channel_id: Some(channel_id_2),
11007 discard_funding: true,
11008 channel_funding_txo: Some(funding_txo_2),
11009 user_channel_id: Some(43),
11010 ..Default::default()
11014 // Ensure the channels don't exist anymore.
11015 assert!(nodes[0].node.list_channels().is_empty());
11019 fn test_batch_funding_close_after_funding_signed() {
11020 let chanmon_cfgs = create_chanmon_cfgs(3);
11021 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
11022 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
11023 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
11025 // Initiate channel opening and create the batch channel funding transaction.
11026 let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
11027 (&nodes[1], 100_000, 0, 42, None),
11028 (&nodes[2], 200_000, 0, 43, None),
11031 // Go through the funding_created and funding_signed flow with node 1.
11032 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
11033 check_added_monitors(&nodes[1], 1);
11034 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
11036 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11037 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
11038 check_added_monitors(&nodes[0], 1);
11040 // Go through the funding_created and funding_signed flow with node 2.
11041 nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
11042 check_added_monitors(&nodes[2], 1);
11043 expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
11045 let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11046 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
11047 nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
11048 check_added_monitors(&nodes[0], 1);
11050 // The transaction should not have been broadcast before all channels are ready.
11051 assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
11053 // Force-close the channel for which we've completed the initial monitor.
11054 let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
11055 let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
11056 let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
11057 let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
11058 nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
11059 check_added_monitors(&nodes[0], 2);
11061 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
11062 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
11063 assert_eq!(monitor_updates_1.len(), 1);
11064 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11065 let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
11066 assert_eq!(monitor_updates_2.len(), 1);
11067 assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11069 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
11070 match msg_events[0] {
11071 MessageSendEvent::HandleError { .. } => (),
11072 _ => panic!("Unexpected message."),
11075 // We broadcast the commitment transaction as part of the force-close.
11077 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
11078 assert_eq!(broadcasted_txs.len(), 1);
11079 assert!(broadcasted_txs[0].txid() != tx.txid());
11080 assert_eq!(broadcasted_txs[0].input.len(), 1);
11081 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
11084 // All channels in the batch should close immediately.
11085 check_closed_events(&nodes[0], &[
11086 ExpectedCloseEvent {
11087 channel_id: Some(channel_id_1),
11088 discard_funding: true,
11089 channel_funding_txo: Some(funding_txo_1),
11090 user_channel_id: Some(42),
11091 ..Default::default()
11093 ExpectedCloseEvent {
11094 channel_id: Some(channel_id_2),
11095 discard_funding: true,
11096 channel_funding_txo: Some(funding_txo_2),
11097 user_channel_id: Some(43),
11098 ..Default::default()
11102 // Ensure the channels don't exist anymore.
11103 assert!(nodes[0].node.list_channels().is_empty());
11106 fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitment: bool) {
11107 // Tests that a node will forget the channel (when it only requires 1 confirmation) if the
11108 // funding and commitment transaction confirm in the same block.
11109 let chanmon_cfgs = create_chanmon_cfgs(2);
11110 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11111 let mut min_depth_1_block_cfg = test_default_channel_config();
11112 min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1;
11113 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg), Some(min_depth_1_block_cfg)]);
11114 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11116 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
11117 let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
11119 assert_eq!(nodes[0].node.list_channels().len(), 1);
11120 assert_eq!(nodes[1].node.list_channels().len(), 1);
11122 let (closing_node, other_node) = if confirm_remote_commitment {
11123 (&nodes[1], &nodes[0])
11125 (&nodes[0], &nodes[1])
11128 closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
11129 let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
11130 assert_eq!(msg_events.len(), 1);
11131 match msg_events.pop().unwrap() {
11132 MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {},
11133 _ => panic!("Unexpected event"),
11135 check_added_monitors(closing_node, 1);
11136 check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed, false, &[other_node.node.get_our_node_id()], 1_000_000);
11138 let commitment_tx = {
11139 let mut txn = closing_node.tx_broadcaster.txn_broadcast();
11140 assert_eq!(txn.len(), 1);
11141 let commitment_tx = txn.pop().unwrap();
11142 check_spends!(commitment_tx, funding_tx);
11146 mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]);
11147 mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]);
11149 check_closed_broadcast(other_node, 1, true);
11150 check_added_monitors(other_node, 1);
11151 check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000);
11153 assert!(nodes[0].node.list_channels().is_empty());
11154 assert!(nodes[1].node.list_channels().is_empty());
11158 fn test_funding_and_commitment_tx_confirm_same_block() {
11159 do_test_funding_and_commitment_tx_confirm_same_block(false);
11160 do_test_funding_and_commitment_tx_confirm_same_block(true);
11164 fn test_accept_inbound_channel_errors_queued() {
11165 // For manually accepted inbound channels, tests that a close error is correctly handled
11166 // and the channel fails for the initiator.
11167 let mut config0 = test_default_channel_config();
11168 let mut config1 = config0.clone();
11169 config1.channel_handshake_limits.their_to_self_delay = 1000;
11170 config1.manually_accept_inbound_channels = true;
11171 config0.channel_handshake_config.our_to_self_delay = 2000;
11173 let chanmon_cfgs = create_chanmon_cfgs(2);
11174 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11175 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
11176 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11178 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
11179 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
11181 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
11182 let events = nodes[1].node.get_and_clear_pending_events();
11184 Event::OpenChannelRequest { temporary_channel_id, .. } => {
11185 match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
11186 Err(APIError::ChannelUnavailable { err: _ }) => (),
11190 _ => panic!("Unexpected event"),
11192 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
11193 open_channel_msg.common_fields.temporary_channel_id);