1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
22 use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters};
30 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::test_channel_signer::TestChannelSigner;
34 use crate::util::test_utils::{self, WatchtowerPersister};
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::locktime::absolute::LockTime;
42 use bitcoin::blockdata::script::{Builder, ScriptBuf};
43 use bitcoin::blockdata::opcodes;
44 use bitcoin::blockdata::constants::ChainHash;
45 use bitcoin::network::constants::Network;
46 use bitcoin::{Sequence, Transaction, TxIn, TxOut, Witness};
47 use bitcoin::OutPoint as BitcoinOutPoint;
49 use bitcoin::secp256k1::Secp256k1;
50 use bitcoin::secp256k1::{PublicKey,SecretKey};
53 use crate::prelude::*;
54 use alloc::collections::BTreeSet;
55 use core::iter::repeat;
56 use bitcoin::hashes::Hash;
57 use crate::sync::{Arc, Mutex, RwLock};
59 use crate::ln::functional_test_utils::*;
60 use crate::ln::chan_utils::CommitmentTransaction;
62 use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
65 fn test_channel_resumption_fail_post_funding() {
66 // If we fail to exchange funding with a peer prior to it disconnecting we'll resume the
67 // channel open on reconnect, however if we do exchange funding we do not currently support
68 // replaying it and here test that the channel closes.
69 let chanmon_cfgs = create_chanmon_cfgs(2);
70 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
71 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
72 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
74 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 0, 42, None, None).unwrap();
75 let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
76 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan);
77 let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
78 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan);
80 let (temp_chan_id, tx, funding_output) =
81 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
82 let new_chan_id = ChannelId::v1_from_funding_outpoint(funding_output);
83 nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
85 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
86 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)]);
88 // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that
90 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
91 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
93 assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
97 fn test_insane_channel_opens() {
98 // Stand up a network of 2 nodes
99 use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
100 let mut cfg = UserConfig::default();
101 cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
102 let chanmon_cfgs = create_chanmon_cfgs(2);
103 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
104 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
105 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
107 // Instantiate channel parameters where we push the maximum msats given our
109 let channel_value_sat = 31337; // same as funding satoshis
110 let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
111 let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
113 // Have node0 initiate a channel to node1 with aforementioned parameters
114 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap();
116 // Extract the channel open message from node0 to node1
117 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
119 // Test helper that asserts we get the correct error string given a mutator
120 // that supposedly makes the channel open message insane
121 let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
122 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
123 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
124 assert_eq!(msg_events.len(), 1);
125 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
126 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
128 &ErrorAction::SendErrorMessage { .. } => {
129 nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
131 _ => panic!("unexpected event!"),
133 } else { assert!(false); }
136 use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
138 // Test all mutations that would make the channel open message insane
139 insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
140 insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
142 insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
144 insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
146 insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
148 insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
150 insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
152 insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
154 insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
158 fn test_funding_exceeds_no_wumbo_limit() {
159 // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
161 use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
162 let chanmon_cfgs = create_chanmon_cfgs(2);
163 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
164 *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
165 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
166 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
168 match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) {
169 Err(APIError::APIMisuseError { err }) => {
170 assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
176 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
177 // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
178 // but only for them. Because some LSPs do it with some level of trust of the clients (for a
179 // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
180 // in normal testing, we test it explicitly here.
181 let chanmon_cfgs = create_chanmon_cfgs(2);
182 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
183 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
184 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
185 let default_config = UserConfig::default();
187 // Have node0 initiate a channel to node1 with aforementioned parameters
188 let mut push_amt = 100_000_000;
189 let feerate_per_kw = 253;
190 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
191 push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
192 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
194 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap();
195 let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
196 if !send_from_initiator {
197 open_channel_message.channel_reserve_satoshis = 0;
198 open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
200 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
202 // Extract the channel accept message from node1 to node0
203 let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
204 if send_from_initiator {
205 accept_channel_message.channel_reserve_satoshis = 0;
206 accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
208 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
210 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
211 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
212 let mut sender_node_per_peer_lock;
213 let mut sender_node_peer_state_lock;
215 let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
216 match channel_phase {
217 ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
218 let chan_context = channel_phase.context_mut();
219 chan_context.holder_selected_channel_reserve_satoshis = 0;
220 chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
226 let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
227 let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
228 create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
230 // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
231 // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
232 if send_from_initiator {
233 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
234 // Note that for outbound channels we have to consider the commitment tx fee and the
235 // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
236 // well as an additional HTLC.
237 - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
239 send_payment(&nodes[1], &[&nodes[0]], push_amt);
244 fn test_counterparty_no_reserve() {
245 do_test_counterparty_no_reserve(true);
246 do_test_counterparty_no_reserve(false);
250 fn test_async_inbound_update_fee() {
251 let chanmon_cfgs = create_chanmon_cfgs(2);
252 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
253 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
254 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
255 create_announced_chan_between_nodes(&nodes, 0, 1);
258 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
262 // send (1) commitment_signed -.
263 // <- update_add_htlc/commitment_signed
264 // send (2) RAA (awaiting remote revoke) -.
265 // (1) commitment_signed is delivered ->
266 // .- send (3) RAA (awaiting remote revoke)
267 // (2) RAA is delivered ->
268 // .- send (4) commitment_signed
269 // <- (3) RAA is delivered
270 // send (5) commitment_signed -.
271 // <- (4) commitment_signed is delivered
273 // (5) commitment_signed is delivered ->
275 // (6) RAA is delivered ->
277 // First nodes[0] generates an update_fee
279 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
282 nodes[0].node.timer_tick_occurred();
283 check_added_monitors!(nodes[0], 1);
285 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
286 assert_eq!(events_0.len(), 1);
287 let (update_msg, commitment_signed) = match events_0[0] { // (1)
288 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
289 (update_fee.as_ref(), commitment_signed)
291 _ => panic!("Unexpected event"),
294 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
296 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
297 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
298 nodes[1].node.send_payment_with_route(&route, our_payment_hash,
299 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
300 check_added_monitors!(nodes[1], 1);
302 let payment_event = {
303 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
304 assert_eq!(events_1.len(), 1);
305 SendEvent::from_event(events_1.remove(0))
307 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
308 assert_eq!(payment_event.msgs.len(), 1);
310 // ...now when the messages get delivered everyone should be happy
311 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
312 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
313 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
314 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
315 check_added_monitors!(nodes[0], 1);
317 // deliver(1), generate (3):
318 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
319 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
320 // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
321 check_added_monitors!(nodes[1], 1);
323 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
324 let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
325 assert!(bs_update.update_add_htlcs.is_empty()); // (4)
326 assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
327 assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
328 assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
329 assert!(bs_update.update_fee.is_none()); // (4)
330 check_added_monitors!(nodes[1], 1);
332 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
333 let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
334 assert!(as_update.update_add_htlcs.is_empty()); // (5)
335 assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
336 assert!(as_update.update_fail_htlcs.is_empty()); // (5)
337 assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
338 assert!(as_update.update_fee.is_none()); // (5)
339 check_added_monitors!(nodes[0], 1);
341 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
342 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
343 // only (6) so get_event_msg's assert(len == 1) passes
344 check_added_monitors!(nodes[0], 1);
346 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
347 let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
348 check_added_monitors!(nodes[1], 1);
350 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
351 check_added_monitors!(nodes[0], 1);
353 let events_2 = nodes[0].node.get_and_clear_pending_events();
354 assert_eq!(events_2.len(), 1);
356 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
357 _ => panic!("Unexpected event"),
360 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
361 check_added_monitors!(nodes[1], 1);
365 fn test_update_fee_unordered_raa() {
366 // Just the intro to the previous test followed by an out-of-order RAA (which caused a
367 // crash in an earlier version of the update_fee patch)
368 let chanmon_cfgs = create_chanmon_cfgs(2);
369 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
370 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
371 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
372 create_announced_chan_between_nodes(&nodes, 0, 1);
375 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
377 // First nodes[0] generates an update_fee
379 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
382 nodes[0].node.timer_tick_occurred();
383 check_added_monitors!(nodes[0], 1);
385 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
386 assert_eq!(events_0.len(), 1);
387 let update_msg = match events_0[0] { // (1)
388 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
391 _ => panic!("Unexpected event"),
394 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
396 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
397 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
398 nodes[1].node.send_payment_with_route(&route, our_payment_hash,
399 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
400 check_added_monitors!(nodes[1], 1);
402 let payment_event = {
403 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
404 assert_eq!(events_1.len(), 1);
405 SendEvent::from_event(events_1.remove(0))
407 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
408 assert_eq!(payment_event.msgs.len(), 1);
410 // ...now when the messages get delivered everyone should be happy
411 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
412 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
413 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
414 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
415 check_added_monitors!(nodes[0], 1);
417 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
418 check_added_monitors!(nodes[1], 1);
420 // We can't continue, sadly, because our (1) now has a bogus signature
424 fn test_multi_flight_update_fee() {
425 let chanmon_cfgs = create_chanmon_cfgs(2);
426 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
427 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
428 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
429 create_announced_chan_between_nodes(&nodes, 0, 1);
432 // update_fee/commitment_signed ->
433 // .- send (1) RAA and (2) commitment_signed
434 // update_fee (never committed) ->
436 // We have to manually generate the above update_fee, it is allowed by the protocol but we
437 // don't track which updates correspond to which revoke_and_ack responses so we're in
438 // AwaitingRAA mode and will not generate the update_fee yet.
439 // <- (1) RAA delivered
440 // (3) is generated and send (4) CS -.
441 // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
442 // know the per_commitment_point to use for it.
443 // <- (2) commitment_signed delivered
445 // B should send no response here
446 // (4) commitment_signed delivered ->
447 // <- RAA/commitment_signed delivered
450 // First nodes[0] generates an update_fee
453 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
454 initial_feerate = *feerate_lock;
455 *feerate_lock = initial_feerate + 20;
457 nodes[0].node.timer_tick_occurred();
458 check_added_monitors!(nodes[0], 1);
460 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
461 assert_eq!(events_0.len(), 1);
462 let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
463 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
464 (update_fee.as_ref().unwrap(), commitment_signed)
466 _ => panic!("Unexpected event"),
469 // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
470 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
471 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
472 let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
473 check_added_monitors!(nodes[1], 1);
475 // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
478 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
479 *feerate_lock = initial_feerate + 40;
481 nodes[0].node.timer_tick_occurred();
482 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
483 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
485 // Create the (3) update_fee message that nodes[0] will generate before it does...
486 let mut update_msg_2 = msgs::UpdateFee {
487 channel_id: update_msg_1.channel_id.clone(),
488 feerate_per_kw: (initial_feerate + 30) as u32,
491 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
493 update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
495 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
497 // Deliver (1), generating (3) and (4)
498 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
499 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
500 check_added_monitors!(nodes[0], 1);
501 assert!(as_second_update.update_add_htlcs.is_empty());
502 assert!(as_second_update.update_fulfill_htlcs.is_empty());
503 assert!(as_second_update.update_fail_htlcs.is_empty());
504 assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
505 // Check that the update_fee newly generated matches what we delivered:
506 assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
507 assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
509 // Deliver (2) commitment_signed
510 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
511 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
512 check_added_monitors!(nodes[0], 1);
513 // No commitment_signed so get_event_msg's assert(len == 1) passes
515 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
516 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
517 check_added_monitors!(nodes[1], 1);
520 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
521 let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
522 check_added_monitors!(nodes[1], 1);
524 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
525 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
526 check_added_monitors!(nodes[0], 1);
528 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
529 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
530 // No commitment_signed so get_event_msg's assert(len == 1) passes
531 check_added_monitors!(nodes[0], 1);
533 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
534 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
535 check_added_monitors!(nodes[1], 1);
538 fn do_test_sanity_on_in_flight_opens(steps: u8) {
539 // Previously, we had issues deserializing channels when we hadn't connected the first block
540 // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
541 // serialization round-trips and simply do steps towards opening a channel and then drop the
544 let chanmon_cfgs = create_chanmon_cfgs(2);
545 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
546 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
547 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
549 if steps & 0b1000_0000 != 0{
550 let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
551 connect_block(&nodes[0], &block);
552 connect_block(&nodes[1], &block);
555 if steps & 0x0f == 0 { return; }
556 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
557 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
559 if steps & 0x0f == 1 { return; }
560 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
561 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
563 if steps & 0x0f == 2 { return; }
564 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
566 let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
568 if steps & 0x0f == 3 { return; }
569 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
570 check_added_monitors!(nodes[0], 0);
571 let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
573 if steps & 0x0f == 4 { return; }
574 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
576 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
577 assert_eq!(added_monitors.len(), 1);
578 assert_eq!(added_monitors[0].0, funding_output);
579 added_monitors.clear();
581 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
583 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
585 if steps & 0x0f == 5 { return; }
586 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
588 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
589 assert_eq!(added_monitors.len(), 1);
590 assert_eq!(added_monitors[0].0, funding_output);
591 added_monitors.clear();
594 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
595 let events_4 = nodes[0].node.get_and_clear_pending_events();
596 assert_eq!(events_4.len(), 0);
598 if steps & 0x0f == 6 { return; }
599 create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
601 if steps & 0x0f == 7 { return; }
602 confirm_transaction_at(&nodes[0], &tx, 2);
603 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
604 create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
605 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
609 fn test_sanity_on_in_flight_opens() {
610 do_test_sanity_on_in_flight_opens(0);
611 do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
612 do_test_sanity_on_in_flight_opens(1);
613 do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
614 do_test_sanity_on_in_flight_opens(2);
615 do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
616 do_test_sanity_on_in_flight_opens(3);
617 do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
618 do_test_sanity_on_in_flight_opens(4);
619 do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
620 do_test_sanity_on_in_flight_opens(5);
621 do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
622 do_test_sanity_on_in_flight_opens(6);
623 do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
624 do_test_sanity_on_in_flight_opens(7);
625 do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
626 do_test_sanity_on_in_flight_opens(8);
627 do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
631 fn test_update_fee_vanilla() {
632 let chanmon_cfgs = create_chanmon_cfgs(2);
633 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
634 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
635 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
636 create_announced_chan_between_nodes(&nodes, 0, 1);
639 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
642 nodes[0].node.timer_tick_occurred();
643 check_added_monitors!(nodes[0], 1);
645 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
646 assert_eq!(events_0.len(), 1);
647 let (update_msg, commitment_signed) = match events_0[0] {
648 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
649 (update_fee.as_ref(), commitment_signed)
651 _ => panic!("Unexpected event"),
653 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
655 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
656 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
657 check_added_monitors!(nodes[1], 1);
659 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
660 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
661 check_added_monitors!(nodes[0], 1);
663 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
664 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
665 // No commitment_signed so get_event_msg's assert(len == 1) passes
666 check_added_monitors!(nodes[0], 1);
668 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
669 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
670 check_added_monitors!(nodes[1], 1);
674 fn test_update_fee_that_funder_cannot_afford() {
675 let chanmon_cfgs = create_chanmon_cfgs(2);
676 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
677 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
678 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
679 let channel_value = 5000;
681 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
682 let channel_id = chan.2;
683 let secp_ctx = Secp256k1::new();
684 let default_config = UserConfig::default();
685 let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
687 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
689 // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
690 // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
691 // calculate two different feerates here - the expected local limit as well as the expected
693 let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
694 let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
696 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
697 *feerate_lock = feerate;
699 nodes[0].node.timer_tick_occurred();
700 check_added_monitors!(nodes[0], 1);
701 let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
703 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
705 commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
707 // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
709 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
711 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
712 assert_eq!(commitment_tx.output.len(), 2);
713 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
714 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
715 actual_fee = channel_value - actual_fee;
716 assert_eq!(total_fee, actual_fee);
720 // Increment the feerate by a small constant, accounting for rounding errors
721 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
724 nodes[0].node.timer_tick_occurred();
725 nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
726 check_added_monitors!(nodes[0], 0);
728 const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
730 // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
731 // needed to sign the new commitment tx and (2) sign the new commitment tx.
732 let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
733 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
734 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
735 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
736 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
737 ).flatten().unwrap();
738 let chan_signer = local_chan.get_signer();
739 let pubkeys = chan_signer.as_ref().pubkeys();
740 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
741 pubkeys.funding_pubkey)
743 let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
744 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
745 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
746 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
747 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
748 ).flatten().unwrap();
749 let chan_signer = remote_chan.get_signer();
750 let pubkeys = chan_signer.as_ref().pubkeys();
751 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
752 chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
753 pubkeys.funding_pubkey)
756 // Assemble the set of keys we can use for signatures for our commitment_signed message.
757 let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
758 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
761 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
762 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
763 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
764 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
765 ).flatten().unwrap();
766 let local_chan_signer = local_chan.get_signer();
767 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
768 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
769 INITIAL_COMMITMENT_NUMBER - 1,
771 channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
772 local_funding, remote_funding,
773 commit_tx_keys.clone(),
774 non_buffer_feerate + 4,
776 &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
778 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
781 let commit_signed_msg = msgs::CommitmentSigned {
784 htlc_signatures: res.1,
786 partial_signature_with_nonce: None,
789 let update_fee = msgs::UpdateFee {
791 feerate_per_kw: non_buffer_feerate + 4,
794 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
796 //While producing the commitment_signed response after handling a received update_fee request the
797 //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
798 //Should produce and error.
799 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
800 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3);
801 check_added_monitors!(nodes[1], 1);
802 check_closed_broadcast!(nodes[1], true);
803 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
804 [nodes[0].node.get_our_node_id()], channel_value);
808 fn test_update_fee_with_fundee_update_add_htlc() {
809 let chanmon_cfgs = create_chanmon_cfgs(2);
810 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
811 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
812 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
813 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
816 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
819 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
822 nodes[0].node.timer_tick_occurred();
823 check_added_monitors!(nodes[0], 1);
825 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
826 assert_eq!(events_0.len(), 1);
827 let (update_msg, commitment_signed) = match events_0[0] {
828 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
829 (update_fee.as_ref(), commitment_signed)
831 _ => panic!("Unexpected event"),
833 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
834 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
835 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
836 check_added_monitors!(nodes[1], 1);
838 let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
840 // nothing happens since node[1] is in AwaitingRemoteRevoke
841 nodes[1].node.send_payment_with_route(&route, our_payment_hash,
842 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
844 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
845 assert_eq!(added_monitors.len(), 0);
846 added_monitors.clear();
848 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
849 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
850 // node[1] has nothing to do
852 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
853 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
854 check_added_monitors!(nodes[0], 1);
856 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
857 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
858 // No commitment_signed so get_event_msg's assert(len == 1) passes
859 check_added_monitors!(nodes[0], 1);
860 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
861 check_added_monitors!(nodes[1], 1);
862 // AwaitingRemoteRevoke ends here
864 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
865 assert_eq!(commitment_update.update_add_htlcs.len(), 1);
866 assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
867 assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
868 assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
869 assert_eq!(commitment_update.update_fee.is_none(), true);
871 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
872 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
873 check_added_monitors!(nodes[0], 1);
874 let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
876 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
877 check_added_monitors!(nodes[1], 1);
878 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
880 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
881 check_added_monitors!(nodes[1], 1);
882 let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
883 // No commitment_signed so get_event_msg's assert(len == 1) passes
885 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
886 check_added_monitors!(nodes[0], 1);
887 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
889 expect_pending_htlcs_forwardable!(nodes[0]);
891 let events = nodes[0].node.get_and_clear_pending_events();
892 assert_eq!(events.len(), 1);
894 Event::PaymentClaimable { .. } => { },
895 _ => panic!("Unexpected event"),
898 claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
900 send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
901 send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
902 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
903 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
904 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
908 fn test_update_fee() {
909 let chanmon_cfgs = create_chanmon_cfgs(2);
910 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
911 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
912 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
913 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
914 let channel_id = chan.2;
917 // (1) update_fee/commitment_signed ->
918 // <- (2) revoke_and_ack
919 // .- send (3) commitment_signed
920 // (4) update_fee/commitment_signed ->
921 // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
922 // <- (3) commitment_signed delivered
923 // send (6) revoke_and_ack -.
924 // <- (5) deliver revoke_and_ack
925 // (6) deliver revoke_and_ack ->
926 // .- send (7) commitment_signed in response to (4)
927 // <- (7) deliver commitment_signed
930 // Create and deliver (1)...
933 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
934 feerate = *feerate_lock;
935 *feerate_lock = feerate + 20;
937 nodes[0].node.timer_tick_occurred();
938 check_added_monitors!(nodes[0], 1);
940 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
941 assert_eq!(events_0.len(), 1);
942 let (update_msg, commitment_signed) = match events_0[0] {
943 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
944 (update_fee.as_ref(), commitment_signed)
946 _ => panic!("Unexpected event"),
948 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
950 // Generate (2) and (3):
951 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
952 let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
953 check_added_monitors!(nodes[1], 1);
956 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
957 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
958 check_added_monitors!(nodes[0], 1);
960 // Create and deliver (4)...
962 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
963 *feerate_lock = feerate + 30;
965 nodes[0].node.timer_tick_occurred();
966 check_added_monitors!(nodes[0], 1);
967 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
968 assert_eq!(events_0.len(), 1);
969 let (update_msg, commitment_signed) = match events_0[0] {
970 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
971 (update_fee.as_ref(), commitment_signed)
973 _ => panic!("Unexpected event"),
976 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
977 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
978 check_added_monitors!(nodes[1], 1);
980 let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
981 // No commitment_signed so get_event_msg's assert(len == 1) passes
983 // Handle (3), creating (6):
984 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
985 check_added_monitors!(nodes[0], 1);
986 let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
987 // No commitment_signed so get_event_msg's assert(len == 1) passes
990 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
991 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
992 check_added_monitors!(nodes[0], 1);
994 // Deliver (6), creating (7):
995 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
996 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
997 assert!(commitment_update.update_add_htlcs.is_empty());
998 assert!(commitment_update.update_fulfill_htlcs.is_empty());
999 assert!(commitment_update.update_fail_htlcs.is_empty());
1000 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
1001 assert!(commitment_update.update_fee.is_none());
1002 check_added_monitors!(nodes[1], 1);
1005 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
1006 check_added_monitors!(nodes[0], 1);
1007 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1008 // No commitment_signed so get_event_msg's assert(len == 1) passes
1010 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
1011 check_added_monitors!(nodes[1], 1);
1012 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1014 assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
1015 assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
1016 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
1017 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1018 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1022 fn fake_network_test() {
1023 // Simple test which builds a network of ChannelManagers, connects them to each other, and
1024 // tests that payments get routed and transactions broadcast in semi-reasonable ways.
1025 let chanmon_cfgs = create_chanmon_cfgs(4);
1026 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1027 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1028 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1030 // Create some initial channels
1031 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1032 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1033 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
1035 // Rebalance the network a bit by relaying one payment through all the channels...
1036 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1037 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1038 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1039 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1041 // Send some more payments
1042 send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1043 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1044 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1046 // Test failure packets
1047 let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1048 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1050 // Add a new channel that skips 3
1051 let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1053 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1054 send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1055 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1056 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1057 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1058 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1059 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1061 // Do some rebalance loop payments, simultaneously
1062 let mut hops = Vec::with_capacity(3);
1063 hops.push(RouteHop {
1064 pubkey: nodes[2].node.get_our_node_id(),
1065 node_features: NodeFeatures::empty(),
1066 short_channel_id: chan_2.0.contents.short_channel_id,
1067 channel_features: ChannelFeatures::empty(),
1069 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32,
1070 maybe_announced_channel: true,
1072 hops.push(RouteHop {
1073 pubkey: nodes[3].node.get_our_node_id(),
1074 node_features: NodeFeatures::empty(),
1075 short_channel_id: chan_3.0.contents.short_channel_id,
1076 channel_features: ChannelFeatures::empty(),
1078 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32,
1079 maybe_announced_channel: true,
1081 hops.push(RouteHop {
1082 pubkey: nodes[1].node.get_our_node_id(),
1083 node_features: nodes[1].node.node_features(),
1084 short_channel_id: chan_4.0.contents.short_channel_id,
1085 channel_features: nodes[1].node.channel_features(),
1087 cltv_expiry_delta: TEST_FINAL_CLTV,
1088 maybe_announced_channel: true,
1090 hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1091 hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1092 let payment_preimage_1 = send_along_route(&nodes[1],
1093 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1094 &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1096 let mut hops = Vec::with_capacity(3);
1097 hops.push(RouteHop {
1098 pubkey: nodes[3].node.get_our_node_id(),
1099 node_features: NodeFeatures::empty(),
1100 short_channel_id: chan_4.0.contents.short_channel_id,
1101 channel_features: ChannelFeatures::empty(),
1103 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32,
1104 maybe_announced_channel: true,
1106 hops.push(RouteHop {
1107 pubkey: nodes[2].node.get_our_node_id(),
1108 node_features: NodeFeatures::empty(),
1109 short_channel_id: chan_3.0.contents.short_channel_id,
1110 channel_features: ChannelFeatures::empty(),
1112 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32,
1113 maybe_announced_channel: true,
1115 hops.push(RouteHop {
1116 pubkey: nodes[1].node.get_our_node_id(),
1117 node_features: nodes[1].node.node_features(),
1118 short_channel_id: chan_2.0.contents.short_channel_id,
1119 channel_features: nodes[1].node.channel_features(),
1121 cltv_expiry_delta: TEST_FINAL_CLTV,
1122 maybe_announced_channel: true,
1124 hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1125 hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1126 let payment_hash_2 = send_along_route(&nodes[1],
1127 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1128 &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1130 // Claim the rebalances...
1131 fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1132 claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1134 // Close down the channels...
1135 close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1136 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1137 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1138 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1139 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1140 check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1141 close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1142 check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1143 check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1144 close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1145 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1146 check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1150 fn holding_cell_htlc_counting() {
1151 // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1152 // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1153 // commitment dance rounds.
1154 let chanmon_cfgs = create_chanmon_cfgs(3);
1155 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1156 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1157 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1158 create_announced_chan_between_nodes(&nodes, 0, 1);
1159 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1161 // Fetch a route in advance as we will be unable to once we're unable to send.
1162 let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1164 let mut payments = Vec::new();
1166 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1167 nodes[1].node.send_payment_with_route(&route, payment_hash,
1168 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1169 payments.push((payment_preimage, payment_hash));
1171 check_added_monitors!(nodes[1], 1);
1173 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1174 assert_eq!(events.len(), 1);
1175 let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1176 assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1178 // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1179 // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1182 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1183 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1184 ), true, APIError::ChannelUnavailable { .. }, {});
1185 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1188 // This should also be true if we try to forward a payment.
1189 let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1191 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1192 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1193 check_added_monitors!(nodes[0], 1);
1196 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1197 assert_eq!(events.len(), 1);
1198 let payment_event = SendEvent::from_event(events.pop().unwrap());
1199 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1201 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1202 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1203 // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1204 // fails), the second will process the resulting failure and fail the HTLC backward.
1205 expect_pending_htlcs_forwardable!(nodes[1]);
1206 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1207 check_added_monitors!(nodes[1], 1);
1209 let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1210 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1211 commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1213 expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1215 // Now forward all the pending HTLCs and claim them back
1216 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1217 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1218 check_added_monitors!(nodes[2], 1);
1220 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1221 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1222 check_added_monitors!(nodes[1], 1);
1223 let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1225 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1226 check_added_monitors!(nodes[1], 1);
1227 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1229 for ref update in as_updates.update_add_htlcs.iter() {
1230 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1232 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1233 check_added_monitors!(nodes[2], 1);
1234 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1235 check_added_monitors!(nodes[2], 1);
1236 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1238 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1239 check_added_monitors!(nodes[1], 1);
1240 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1241 check_added_monitors!(nodes[1], 1);
1242 let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1244 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1245 check_added_monitors!(nodes[2], 1);
1247 expect_pending_htlcs_forwardable!(nodes[2]);
1249 let events = nodes[2].node.get_and_clear_pending_events();
1250 assert_eq!(events.len(), payments.len());
1251 for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1253 &Event::PaymentClaimable { ref payment_hash, .. } => {
1254 assert_eq!(*payment_hash, *hash);
1256 _ => panic!("Unexpected event"),
1260 for (preimage, _) in payments.drain(..) {
1261 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1264 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1268 fn duplicate_htlc_test() {
1269 // Test that we accept duplicate payment_hash HTLCs across the network and that
1270 // claiming/failing them are all separate and don't affect each other
1271 let chanmon_cfgs = create_chanmon_cfgs(6);
1272 let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1273 let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1274 let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1276 // Create some initial channels to route via 3 to 4/5 from 0/1/2
1277 create_announced_chan_between_nodes(&nodes, 0, 3);
1278 create_announced_chan_between_nodes(&nodes, 1, 3);
1279 create_announced_chan_between_nodes(&nodes, 2, 3);
1280 create_announced_chan_between_nodes(&nodes, 3, 4);
1281 create_announced_chan_between_nodes(&nodes, 3, 5);
1283 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1285 *nodes[0].network_payment_count.borrow_mut() -= 1;
1286 assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1288 *nodes[0].network_payment_count.borrow_mut() -= 1;
1289 assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1291 claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1292 fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1293 claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1297 fn test_duplicate_htlc_different_direction_onchain() {
1298 // Test that ChannelMonitor doesn't generate 2 preimage txn
1299 // when we have 2 HTLCs with same preimage that go across a node
1300 // in opposite directions, even with the same payment secret.
1301 let chanmon_cfgs = create_chanmon_cfgs(2);
1302 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1303 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1304 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1306 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1309 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1311 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1313 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1314 let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1315 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1317 // Provide preimage to node 0 by claiming payment
1318 nodes[0].node.claim_funds(payment_preimage);
1319 expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1320 check_added_monitors!(nodes[0], 1);
1322 // Broadcast node 1 commitment txn
1323 let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1325 assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1326 let mut has_both_htlcs = 0; // check htlcs match ones committed
1327 for outp in remote_txn[0].output.iter() {
1328 if outp.value == 800_000 / 1000 {
1329 has_both_htlcs += 1;
1330 } else if outp.value == 900_000 / 1000 {
1331 has_both_htlcs += 1;
1334 assert_eq!(has_both_htlcs, 2);
1336 mine_transaction(&nodes[0], &remote_txn[0]);
1337 check_added_monitors!(nodes[0], 1);
1338 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
1339 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
1341 let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1342 assert_eq!(claim_txn.len(), 3);
1344 check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1345 check_spends!(claim_txn[1], remote_txn[0]);
1346 check_spends!(claim_txn[2], remote_txn[0]);
1347 let preimage_tx = &claim_txn[0];
1348 let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1349 (&claim_txn[1], &claim_txn[2])
1351 (&claim_txn[2], &claim_txn[1])
1354 assert_eq!(preimage_tx.input.len(), 1);
1355 assert_eq!(preimage_bump_tx.input.len(), 1);
1357 assert_eq!(preimage_tx.input.len(), 1);
1358 assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1359 assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800);
1361 assert_eq!(timeout_tx.input.len(), 1);
1362 assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1363 check_spends!(timeout_tx, remote_txn[0]);
1364 assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900);
1366 let events = nodes[0].node.get_and_clear_pending_msg_events();
1367 assert_eq!(events.len(), 3);
1370 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1371 MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => {
1372 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1373 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1375 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1376 assert!(update_add_htlcs.is_empty());
1377 assert!(update_fail_htlcs.is_empty());
1378 assert_eq!(update_fulfill_htlcs.len(), 1);
1379 assert!(update_fail_malformed_htlcs.is_empty());
1380 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1382 _ => panic!("Unexpected event"),
1388 fn test_basic_channel_reserve() {
1389 let chanmon_cfgs = create_chanmon_cfgs(2);
1390 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1391 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1392 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1393 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1395 let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1396 let channel_reserve = chan_stat.channel_reserve_msat;
1398 // The 2* and +1 are for the fee spike reserve.
1399 let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
1400 let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1401 let (mut route, our_payment_hash, _, our_payment_secret) =
1402 get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
1403 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1404 let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1405 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1407 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1408 if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
1409 else { panic!("Unexpected error variant"); }
1411 _ => panic!("Unexpected error variant"),
1413 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1415 send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1419 fn test_fee_spike_violation_fails_htlc() {
1420 let chanmon_cfgs = create_chanmon_cfgs(2);
1421 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1422 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1423 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1424 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1426 let (mut route, payment_hash, _, payment_secret) =
1427 get_route_and_payment_hash!(nodes[0], nodes[1], 3460000);
1428 route.paths[0].hops[0].fee_msat += 1;
1429 // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1430 let secp_ctx = Secp256k1::new();
1431 let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1433 let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1435 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1436 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1437 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1438 3460001, &recipient_onion_fields, cur_height, &None).unwrap();
1439 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1440 let msg = msgs::UpdateAddHTLC {
1443 amount_msat: htlc_msat,
1444 payment_hash: payment_hash,
1445 cltv_expiry: htlc_cltv,
1446 onion_routing_packet: onion_packet,
1447 skimmed_fee_msat: None,
1448 blinding_point: None,
1451 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1453 // Now manually create the commitment_signed message corresponding to the update_add
1454 // nodes[0] just sent. In the code for construction of this message, "local" refers
1455 // to the sender of the message, and "remote" refers to the receiver.
1457 let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1459 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1461 // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
1462 // needed to sign the new commitment tx and (2) sign the new commitment tx.
1463 let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1464 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1465 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1466 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
1467 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1468 ).flatten().unwrap();
1469 let chan_signer = local_chan.get_signer();
1470 // Make the signer believe we validated another commitment, so we can release the secret
1471 chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
1473 let pubkeys = chan_signer.as_ref().pubkeys();
1474 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1475 chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1476 chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1477 chan_signer.as_ref().pubkeys().funding_pubkey)
1479 let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1480 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1481 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1482 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
1483 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1484 ).flatten().unwrap();
1485 let chan_signer = remote_chan.get_signer();
1486 let pubkeys = chan_signer.as_ref().pubkeys();
1487 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1488 chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1489 chan_signer.as_ref().pubkeys().funding_pubkey)
1492 // Assemble the set of keys we can use for signatures for our commitment_signed message.
1493 let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1494 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1496 // Build the remote commitment transaction so we can sign it, and then later use the
1497 // signature for the commitment_signed message.
1498 let local_chan_balance = 1313;
1500 let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1502 amount_msat: 3460001,
1503 cltv_expiry: htlc_cltv,
1505 transaction_output_index: Some(1),
1508 let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1511 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1512 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1513 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
1514 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1515 ).flatten().unwrap();
1516 let local_chan_signer = local_chan.get_signer();
1517 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1521 local_funding, remote_funding,
1522 commit_tx_keys.clone(),
1524 &mut vec![(accepted_htlc_info, ())],
1525 &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
1527 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
1530 let commit_signed_msg = msgs::CommitmentSigned {
1533 htlc_signatures: res.1,
1535 partial_signature_with_nonce: None,
1538 // Send the commitment_signed message to the nodes[1].
1539 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1540 let _ = nodes[1].node.get_and_clear_pending_msg_events();
1542 // Send the RAA to nodes[1].
1543 let raa_msg = msgs::RevokeAndACK {
1545 per_commitment_secret: local_secret,
1546 next_per_commitment_point: next_local_point,
1548 next_local_nonce: None,
1550 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1552 let events = nodes[1].node.get_and_clear_pending_msg_events();
1553 assert_eq!(events.len(), 1);
1554 // Make sure the HTLC failed in the way we expect.
1556 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1557 assert_eq!(update_fail_htlcs.len(), 1);
1558 update_fail_htlcs[0].clone()
1560 _ => panic!("Unexpected event"),
1562 nodes[1].logger.assert_log("lightning::ln::channel",
1563 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
1565 check_added_monitors!(nodes[1], 2);
1569 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1570 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1571 // Set the fee rate for the channel very high, to the point where the fundee
1572 // sending any above-dust amount would result in a channel reserve violation.
1573 // In this test we check that we would be prevented from sending an HTLC in
1575 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1576 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1577 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1578 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1579 let default_config = UserConfig::default();
1580 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1582 let mut push_amt = 100_000_000;
1583 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1585 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1587 let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1589 // Fetch a route in advance as we will be unable to once we're unable to send.
1590 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1591 // Sending exactly enough to hit the reserve amount should be accepted
1592 for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1593 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1596 // However one more HTLC should be significantly over the reserve amount and fail.
1597 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1598 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1599 ), true, APIError::ChannelUnavailable { .. }, {});
1600 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1604 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1605 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1606 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1607 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1608 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1609 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1610 let default_config = UserConfig::default();
1611 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1613 // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1614 // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1615 // transaction fee with 0 HTLCs (183 sats)).
1616 let mut push_amt = 100_000_000;
1617 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1618 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1619 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1621 // Send four HTLCs to cover the initial push_msat buffer we're required to include
1622 for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1623 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1626 let (mut route, payment_hash, _, payment_secret) =
1627 get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1628 route.paths[0].hops[0].fee_msat = 700_000;
1629 // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1630 let secp_ctx = Secp256k1::new();
1631 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1632 let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1633 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1634 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1635 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1636 700_000, &recipient_onion_fields, cur_height, &None).unwrap();
1637 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1638 let msg = msgs::UpdateAddHTLC {
1640 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1641 amount_msat: htlc_msat,
1642 payment_hash: payment_hash,
1643 cltv_expiry: htlc_cltv,
1644 onion_routing_packet: onion_packet,
1645 skimmed_fee_msat: None,
1646 blinding_point: None,
1649 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1650 // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1651 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3);
1652 assert_eq!(nodes[0].node.list_channels().len(), 0);
1653 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1654 assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1655 check_added_monitors!(nodes[0], 1);
1656 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
1657 [nodes[1].node.get_our_node_id()], 100000);
1661 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1662 // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1663 // calculating our commitment transaction fee (this was previously broken).
1664 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1665 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1667 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1668 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1669 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1670 let default_config = UserConfig::default();
1671 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1673 // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1674 // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1675 // transaction fee with 0 HTLCs (183 sats)).
1676 let mut push_amt = 100_000_000;
1677 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1678 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1679 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1681 let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1682 + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
1683 // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1684 // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1685 // commitment transaction fee.
1686 route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1688 // Send four HTLCs to cover the initial push_msat buffer we're required to include
1689 for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1690 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1693 // One more than the dust amt should fail, however.
1694 let (mut route, our_payment_hash, _, our_payment_secret) =
1695 get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
1696 route.paths[0].hops[0].fee_msat += 1;
1697 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1698 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1699 ), true, APIError::ChannelUnavailable { .. }, {});
1703 fn test_chan_init_feerate_unaffordability() {
1704 // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1705 // channel reserve and feerate requirements.
1706 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1707 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1708 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1709 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1710 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1711 let default_config = UserConfig::default();
1712 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1714 // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1716 let mut push_amt = 100_000_000;
1717 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1718 assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(),
1719 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1721 // During open, we don't have a "counterparty channel reserve" to check against, so that
1722 // requirement only comes into play on the open_channel handling side.
1723 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1724 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap();
1725 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1726 open_channel_msg.push_msat += 1;
1727 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1729 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1730 assert_eq!(msg_events.len(), 1);
1731 match msg_events[0] {
1732 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1733 assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1735 _ => panic!("Unexpected event"),
1740 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1741 // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1742 // calculating our counterparty's commitment transaction fee (this was previously broken).
1743 let chanmon_cfgs = create_chanmon_cfgs(2);
1744 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1745 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1746 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1747 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1749 let payment_amt = 46000; // Dust amount
1750 // In the previous code, these first four payments would succeed.
1751 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1752 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1753 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1754 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1756 // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1757 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1758 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1759 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1760 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1761 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1763 // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1764 // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1765 // transaction fee and therefore perceived this next payment as a channel reserve violation.
1766 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1770 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1771 let chanmon_cfgs = create_chanmon_cfgs(3);
1772 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1773 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1774 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1775 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1776 let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1779 let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1780 let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1781 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1782 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
1784 // Add a 2* and +1 for the fee spike reserve.
1785 let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1786 let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1787 let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1789 // Add a pending HTLC.
1790 let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1791 let payment_event_1 = {
1792 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1793 RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1794 check_added_monitors!(nodes[0], 1);
1796 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1797 assert_eq!(events.len(), 1);
1798 SendEvent::from_event(events.remove(0))
1800 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1802 // Attempt to trigger a channel reserve violation --> payment failure.
1803 let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
1804 let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1805 let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1806 let mut route_2 = route_1.clone();
1807 route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2;
1809 // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1810 let secp_ctx = Secp256k1::new();
1811 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1812 let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
1813 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1814 let recipient_onion_fields = RecipientOnionFields::spontaneous_empty();
1815 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
1816 &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None).unwrap();
1817 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
1818 let msg = msgs::UpdateAddHTLC {
1821 amount_msat: htlc_msat + 1,
1822 payment_hash: our_payment_hash_1,
1823 cltv_expiry: htlc_cltv,
1824 onion_routing_packet: onion_packet,
1825 skimmed_fee_msat: None,
1826 blinding_point: None,
1829 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1830 // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1831 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3);
1832 assert_eq!(nodes[1].node.list_channels().len(), 1);
1833 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1834 assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1835 check_added_monitors!(nodes[1], 1);
1836 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
1837 [nodes[0].node.get_our_node_id()], 100000);
1841 fn test_inbound_outbound_capacity_is_not_zero() {
1842 let chanmon_cfgs = create_chanmon_cfgs(2);
1843 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1844 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1845 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1846 let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1847 let channels0 = node_chanmgrs[0].list_channels();
1848 let channels1 = node_chanmgrs[1].list_channels();
1849 let default_config = UserConfig::default();
1850 assert_eq!(channels0.len(), 1);
1851 assert_eq!(channels1.len(), 1);
1853 let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1854 assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1855 assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1857 assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1858 assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1861 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
1862 (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1866 fn test_channel_reserve_holding_cell_htlcs() {
1867 let chanmon_cfgs = create_chanmon_cfgs(3);
1868 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1869 // When this test was written, the default base fee floated based on the HTLC count.
1870 // It is now fixed, so we simply set the fee to the expected value here.
1871 let mut config = test_default_channel_config();
1872 config.channel_config.forwarding_fee_base_msat = 239;
1873 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1874 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1875 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1876 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1878 let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1879 let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1881 let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1882 let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1884 macro_rules! expect_forward {
1886 let mut events = $node.node.get_and_clear_pending_msg_events();
1887 assert_eq!(events.len(), 1);
1888 check_added_monitors!($node, 1);
1889 let payment_event = SendEvent::from_event(events.remove(0));
1894 let feemsat = 239; // set above
1895 let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1896 let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1897 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
1899 let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1901 // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1903 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1904 .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1905 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
1906 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1907 assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1909 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1910 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1911 ), true, APIError::ChannelUnavailable { .. }, {});
1912 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1915 // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1916 // nodes[0]'s wealth
1918 let amt_msat = recv_value_0 + total_fee_msat;
1919 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1920 // Also, ensure that each payment has enough to be over the dust limit to
1921 // ensure it'll be included in each commit tx fee calculation.
1922 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1923 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1924 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1928 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1929 .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1930 let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
1931 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1932 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1934 let (stat01_, stat11_, stat12_, stat22_) = (
1935 get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1936 get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1937 get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1938 get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1941 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1942 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1943 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1944 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1945 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1948 // adding pending output.
1949 // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1950 // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1951 // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1952 // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1953 // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1954 // cases where 1 msat over X amount will cause a payment failure, but anything less than
1955 // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1956 // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1957 // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1959 let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1960 let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1961 let amt_msat_1 = recv_value_1 + total_fee_msat;
1963 let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1964 let payment_event_1 = {
1965 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1966 RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1967 check_added_monitors!(nodes[0], 1);
1969 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1970 assert_eq!(events.len(), 1);
1971 SendEvent::from_event(events.remove(0))
1973 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1975 // channel reserve test with htlc pending output > 0
1976 let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1978 let mut route = route_1.clone();
1979 route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1;
1980 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
1981 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1982 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1983 ), true, APIError::ChannelUnavailable { .. }, {});
1984 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1987 // split the rest to test holding cell
1988 let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1989 let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1990 let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1991 let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1993 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1994 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1997 // now see if they go through on both sides
1998 let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
1999 // but this will stuck in the holding cell
2000 nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
2001 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
2002 check_added_monitors!(nodes[0], 0);
2003 let events = nodes[0].node.get_and_clear_pending_events();
2004 assert_eq!(events.len(), 0);
2006 // test with outbound holding cell amount > 0
2008 let (mut route, our_payment_hash, _, our_payment_secret) =
2009 get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2010 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
2011 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
2012 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
2013 ), true, APIError::ChannelUnavailable { .. }, {});
2014 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2017 let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2018 // this will also stuck in the holding cell
2019 nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
2020 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
2021 check_added_monitors!(nodes[0], 0);
2022 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2023 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2025 // flush the pending htlc
2026 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
2027 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2028 check_added_monitors!(nodes[1], 1);
2030 // the pending htlc should be promoted to committed
2031 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
2032 check_added_monitors!(nodes[0], 1);
2033 let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2035 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
2036 let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2037 // No commitment_signed so get_event_msg's assert(len == 1) passes
2038 check_added_monitors!(nodes[0], 1);
2040 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
2041 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2042 check_added_monitors!(nodes[1], 1);
2044 expect_pending_htlcs_forwardable!(nodes[1]);
2046 let ref payment_event_11 = expect_forward!(nodes[1]);
2047 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
2048 commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
2050 expect_pending_htlcs_forwardable!(nodes[2]);
2051 expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
2053 // flush the htlcs in the holding cell
2054 assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
2055 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
2056 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
2057 commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
2058 expect_pending_htlcs_forwardable!(nodes[1]);
2060 let ref payment_event_3 = expect_forward!(nodes[1]);
2061 assert_eq!(payment_event_3.msgs.len(), 2);
2062 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2063 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2065 commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2066 expect_pending_htlcs_forwardable!(nodes[2]);
2068 let events = nodes[2].node.get_and_clear_pending_events();
2069 assert_eq!(events.len(), 2);
2071 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2072 assert_eq!(our_payment_hash_21, *payment_hash);
2073 assert_eq!(recv_value_21, amount_msat);
2074 assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2075 assert_eq!(via_channel_id, Some(chan_2.2));
2077 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2078 assert!(payment_preimage.is_none());
2079 assert_eq!(our_payment_secret_21, *payment_secret);
2081 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2084 _ => panic!("Unexpected event"),
2087 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2088 assert_eq!(our_payment_hash_22, *payment_hash);
2089 assert_eq!(recv_value_22, amount_msat);
2090 assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2091 assert_eq!(via_channel_id, Some(chan_2.2));
2093 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2094 assert!(payment_preimage.is_none());
2095 assert_eq!(our_payment_secret_22, *payment_secret);
2097 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2100 _ => panic!("Unexpected event"),
2103 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2104 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2105 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2107 let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
2108 let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2109 send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2111 let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
2112 let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2113 let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2114 assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2115 assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2117 let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2118 assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2122 fn channel_reserve_in_flight_removes() {
2123 // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2124 // can send to its counterparty, but due to update ordering, the other side may not yet have
2125 // considered those HTLCs fully removed.
2126 // This tests that we don't count HTLCs which will not be included in the next remote
2127 // commitment transaction towards the reserve value (as it implies no commitment transaction
2128 // will be generated which violates the remote reserve value).
2129 // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2131 // * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2132 // you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2133 // you only consider the value of the first HTLC, it may not),
2134 // * start routing a third HTLC from A to B,
2135 // * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2136 // the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2137 // * deliver the first fulfill from B
2138 // * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2140 // * deliver A's response CS and RAA.
2141 // This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2142 // removed it fully. B now has the push_msat plus the first two HTLCs in value.
2143 // * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2144 // of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2145 let chanmon_cfgs = create_chanmon_cfgs(2);
2146 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2147 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2148 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2149 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2151 let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2152 // Route the first two HTLCs.
2153 let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2154 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2155 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2157 // Start routing the third HTLC (this is just used to get everyone in the right state).
2158 let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2160 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2161 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2162 check_added_monitors!(nodes[0], 1);
2163 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2164 assert_eq!(events.len(), 1);
2165 SendEvent::from_event(events.remove(0))
2168 // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2169 // initial fulfill/CS.
2170 nodes[1].node.claim_funds(payment_preimage_1);
2171 expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2172 check_added_monitors!(nodes[1], 1);
2173 let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2175 // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2176 // remove the second HTLC when we send the HTLC back from B to A.
2177 nodes[1].node.claim_funds(payment_preimage_2);
2178 expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2179 check_added_monitors!(nodes[1], 1);
2180 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2182 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2183 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2184 check_added_monitors!(nodes[0], 1);
2185 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2186 expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2188 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2189 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2190 check_added_monitors!(nodes[1], 1);
2191 // B is already AwaitingRAA, so cant generate a CS here
2192 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2194 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2195 check_added_monitors!(nodes[1], 1);
2196 let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2198 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2199 check_added_monitors!(nodes[0], 1);
2200 let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2202 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2203 check_added_monitors!(nodes[1], 1);
2204 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2206 // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2207 // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2208 // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2209 // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2210 // on-chain as necessary).
2211 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2212 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2213 check_added_monitors!(nodes[0], 1);
2214 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2215 expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
2217 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2218 check_added_monitors!(nodes[1], 1);
2219 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2221 expect_pending_htlcs_forwardable!(nodes[1]);
2222 expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2224 // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2225 // resolve the second HTLC from A's point of view.
2226 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2227 check_added_monitors!(nodes[0], 1);
2228 expect_payment_path_successful!(nodes[0]);
2229 let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2231 // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2232 // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2233 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2235 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2236 RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2237 check_added_monitors!(nodes[1], 1);
2238 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2239 assert_eq!(events.len(), 1);
2240 SendEvent::from_event(events.remove(0))
2243 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2244 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2245 check_added_monitors!(nodes[0], 1);
2246 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2248 // Now just resolve all the outstanding messages/HTLCs for completeness...
2250 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2251 check_added_monitors!(nodes[1], 1);
2252 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2254 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2255 check_added_monitors!(nodes[1], 1);
2257 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2258 check_added_monitors!(nodes[0], 1);
2259 expect_payment_path_successful!(nodes[0]);
2260 let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2262 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2263 check_added_monitors!(nodes[1], 1);
2264 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2266 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2267 check_added_monitors!(nodes[0], 1);
2269 expect_pending_htlcs_forwardable!(nodes[0]);
2270 expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2272 claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2273 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2277 fn channel_monitor_network_test() {
2278 // Simple test which builds a network of ChannelManagers, connects them to each other, and
2279 // tests that ChannelMonitor is able to recover from various states.
2280 let chanmon_cfgs = create_chanmon_cfgs(5);
2281 let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2282 let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2283 let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2285 // Create some initial channels
2286 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2287 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2288 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2289 let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2291 // Make sure all nodes are at the same starting height
2292 connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2293 connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2294 connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2295 connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2296 connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2298 // Rebalance the network a bit by relaying one payment through all the channels...
2299 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2300 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2301 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2302 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2304 // Simple case with no pending HTLCs:
2305 nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2306 check_added_monitors!(nodes[1], 1);
2307 check_closed_broadcast!(nodes[1], true);
2308 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
2310 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2311 assert_eq!(node_txn.len(), 1);
2312 mine_transaction(&nodes[1], &node_txn[0]);
2313 if nodes[1].connect_style.borrow().updates_best_block_first() {
2314 let _ = nodes[1].tx_broadcaster.txn_broadcast();
2317 mine_transaction(&nodes[0], &node_txn[0]);
2318 check_added_monitors!(nodes[0], 1);
2319 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2321 check_closed_broadcast!(nodes[0], true);
2322 assert_eq!(nodes[0].node.list_channels().len(), 0);
2323 assert_eq!(nodes[1].node.list_channels().len(), 1);
2324 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2326 // One pending HTLC is discarded by the force-close:
2327 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2329 // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2330 // broadcasted until we reach the timelock time).
2331 nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2332 check_closed_broadcast!(nodes[1], true);
2333 check_added_monitors!(nodes[1], 1);
2335 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2336 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2337 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2338 mine_transaction(&nodes[2], &node_txn[0]);
2339 check_added_monitors!(nodes[2], 1);
2340 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2342 check_closed_broadcast!(nodes[2], true);
2343 assert_eq!(nodes[1].node.list_channels().len(), 0);
2344 assert_eq!(nodes[2].node.list_channels().len(), 1);
2345 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
2346 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2348 macro_rules! claim_funds {
2349 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2351 $node.node.claim_funds($preimage);
2352 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2353 check_added_monitors!($node, 1);
2355 let events = $node.node.get_and_clear_pending_msg_events();
2356 assert_eq!(events.len(), 1);
2358 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2359 assert!(update_add_htlcs.is_empty());
2360 assert!(update_fail_htlcs.is_empty());
2361 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2363 _ => panic!("Unexpected event"),
2369 // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2370 // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2371 nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2372 check_added_monitors!(nodes[2], 1);
2373 check_closed_broadcast!(nodes[2], true);
2374 let node2_commitment_txid;
2376 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2377 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2378 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2379 node2_commitment_txid = node_txn[0].txid();
2381 // Claim the payment on nodes[3], giving it knowledge of the preimage
2382 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2383 mine_transaction(&nodes[3], &node_txn[0]);
2384 check_added_monitors!(nodes[3], 1);
2385 check_preimage_claim(&nodes[3], &node_txn);
2387 check_closed_broadcast!(nodes[3], true);
2388 assert_eq!(nodes[2].node.list_channels().len(), 0);
2389 assert_eq!(nodes[3].node.list_channels().len(), 1);
2390 check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
2391 check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2393 // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2394 // confusing us in the following tests.
2395 let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2397 // One pending HTLC to time out:
2398 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2399 // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2402 let (close_chan_update_1, close_chan_update_2) = {
2403 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2404 let events = nodes[3].node.get_and_clear_pending_msg_events();
2405 assert_eq!(events.len(), 2);
2406 let close_chan_update_1 = match events[1] {
2407 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2410 _ => panic!("Unexpected event"),
2413 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2414 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2416 _ => panic!("Unexpected event"),
2418 check_added_monitors!(nodes[3], 1);
2420 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2422 let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2423 node_txn.retain(|tx| {
2424 if tx.input[0].previous_output.txid == node2_commitment_txid {
2430 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2432 // Claim the payment on nodes[4], giving it knowledge of the preimage
2433 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2435 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2436 let events = nodes[4].node.get_and_clear_pending_msg_events();
2437 assert_eq!(events.len(), 2);
2438 let close_chan_update_2 = match events[1] {
2439 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2442 _ => panic!("Unexpected event"),
2445 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2446 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2448 _ => panic!("Unexpected event"),
2450 check_added_monitors!(nodes[4], 1);
2451 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2452 check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
2454 mine_transaction(&nodes[4], &node_txn[0]);
2455 check_preimage_claim(&nodes[4], &node_txn);
2456 (close_chan_update_1, close_chan_update_2)
2458 nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2459 nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2460 assert_eq!(nodes[3].node.list_channels().len(), 0);
2461 assert_eq!(nodes[4].node.list_channels().len(), 0);
2463 assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2464 Ok(ChannelMonitorUpdateStatus::Completed));
2465 check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
2469 fn test_justice_tx_htlc_timeout() {
2470 // Test justice txn built on revoked HTLC-Timeout tx, against both sides
2471 let mut alice_config = test_default_channel_config();
2472 alice_config.channel_handshake_config.announced_channel = true;
2473 alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2474 alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2475 let mut bob_config = test_default_channel_config();
2476 bob_config.channel_handshake_config.announced_channel = true;
2477 bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2478 bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2479 let user_cfgs = [Some(alice_config), Some(bob_config)];
2480 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2481 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2482 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2483 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2484 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2485 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2486 // Create some new channels:
2487 let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2489 // A pending HTLC which will be revoked:
2490 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2491 // Get the will-be-revoked local txn from nodes[0]
2492 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2493 assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2494 assert_eq!(revoked_local_txn[0].input.len(), 1);
2495 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2496 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2497 assert_eq!(revoked_local_txn[1].input.len(), 1);
2498 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2499 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2500 // Revoke the old state
2501 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2504 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2506 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2507 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2508 assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2509 check_spends!(node_txn[0], revoked_local_txn[0]);
2510 node_txn.swap_remove(0);
2512 check_added_monitors!(nodes[1], 1);
2513 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2514 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2516 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2517 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2518 // Verify broadcast of revoked HTLC-timeout
2519 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2520 check_added_monitors!(nodes[0], 1);
2521 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2522 // Broadcast revoked HTLC-timeout on node 1
2523 mine_transaction(&nodes[1], &node_txn[1]);
2524 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2526 get_announce_close_broadcast_events(&nodes, 0, 1);
2527 assert_eq!(nodes[0].node.list_channels().len(), 0);
2528 assert_eq!(nodes[1].node.list_channels().len(), 0);
2532 fn test_justice_tx_htlc_success() {
2533 // Test justice txn built on revoked HTLC-Success tx, against both sides
2534 let mut alice_config = test_default_channel_config();
2535 alice_config.channel_handshake_config.announced_channel = true;
2536 alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2537 alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2538 let mut bob_config = test_default_channel_config();
2539 bob_config.channel_handshake_config.announced_channel = true;
2540 bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2541 bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2542 let user_cfgs = [Some(alice_config), Some(bob_config)];
2543 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2544 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2545 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2546 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2547 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2548 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2549 // Create some new channels:
2550 let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2552 // A pending HTLC which will be revoked:
2553 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2554 // Get the will-be-revoked local txn from B
2555 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2556 assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2557 assert_eq!(revoked_local_txn[0].input.len(), 1);
2558 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2559 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2560 // Revoke the old state
2561 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2563 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2565 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2566 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2567 assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2569 check_spends!(node_txn[0], revoked_local_txn[0]);
2570 node_txn.swap_remove(0);
2572 check_added_monitors!(nodes[0], 1);
2573 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2575 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2576 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2577 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2578 check_added_monitors!(nodes[1], 1);
2579 mine_transaction(&nodes[0], &node_txn[1]);
2580 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2581 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2583 get_announce_close_broadcast_events(&nodes, 0, 1);
2584 assert_eq!(nodes[0].node.list_channels().len(), 0);
2585 assert_eq!(nodes[1].node.list_channels().len(), 0);
2589 fn revoked_output_claim() {
2590 // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2591 // transaction is broadcast by its counterparty
2592 let chanmon_cfgs = create_chanmon_cfgs(2);
2593 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2594 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2595 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2596 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2597 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2598 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2599 assert_eq!(revoked_local_txn.len(), 1);
2600 // Only output is the full channel value back to nodes[0]:
2601 assert_eq!(revoked_local_txn[0].output.len(), 1);
2602 // Send a payment through, updating everyone's latest commitment txn
2603 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2605 // Inform nodes[1] that nodes[0] broadcast a stale tx
2606 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2607 check_added_monitors!(nodes[1], 1);
2608 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2609 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2610 assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2612 check_spends!(node_txn[0], revoked_local_txn[0]);
2614 // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2615 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2616 get_announce_close_broadcast_events(&nodes, 0, 1);
2617 check_added_monitors!(nodes[0], 1);
2618 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2622 fn test_forming_justice_tx_from_monitor_updates() {
2623 do_test_forming_justice_tx_from_monitor_updates(true);
2624 do_test_forming_justice_tx_from_monitor_updates(false);
2627 fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) {
2628 // Simple test to make sure that the justice tx formed in WatchtowerPersister
2629 // is properly formed and can be broadcasted/confirmed successfully in the event
2630 // that a revoked commitment transaction is broadcasted
2631 // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
2632 let chanmon_cfgs = create_chanmon_cfgs(2);
2633 let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap();
2634 let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap();
2635 let persisters = vec![WatchtowerPersister::new(destination_script0),
2636 WatchtowerPersister::new(destination_script1)];
2637 let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
2638 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2639 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2640 let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2641 let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
2643 if !broadcast_initial_commitment {
2644 // Send a payment to move the channel forward
2645 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2648 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output.
2649 // We'll keep this commitment transaction to broadcast once it's revoked.
2650 let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id);
2651 assert_eq!(revoked_local_txn.len(), 1);
2652 let revoked_commitment_tx = &revoked_local_txn[0];
2654 // Send another payment, now revoking the previous commitment tx
2655 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2657 let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap();
2658 check_spends!(justice_tx, revoked_commitment_tx);
2660 mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]);
2661 mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]);
2663 check_added_monitors!(nodes[1], 1);
2664 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
2665 &[nodes[0].node.get_our_node_id()], 100_000);
2666 get_announce_close_broadcast_events(&nodes, 1, 0);
2668 check_added_monitors!(nodes[0], 1);
2669 check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
2670 &[nodes[1].node.get_our_node_id()], 100_000);
2672 // Check that the justice tx has sent the revoked output value to nodes[1]
2673 let monitor = get_monitor!(nodes[1], channel_id);
2674 let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| {
2676 channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis,
2677 _ => panic!("Unexpected balance type"),
2680 // On the first commitment, node[1]'s balance was below dust so it didn't have an output
2681 let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value };
2682 let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value;
2683 assert_eq!(total_claimable_balance, expected_claimable_balance);
2688 fn claim_htlc_outputs_shared_tx() {
2689 // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2690 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2691 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2692 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2693 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2694 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2696 // Create some new channel:
2697 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2699 // Rebalance the network to generate htlc in the two directions
2700 send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2701 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2702 let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2703 let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2705 // Get the will-be-revoked local txn from node[0]
2706 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2707 assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2708 assert_eq!(revoked_local_txn[0].input.len(), 1);
2709 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2710 assert_eq!(revoked_local_txn[1].input.len(), 1);
2711 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2712 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2713 check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2715 //Revoke the old state
2716 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2719 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2720 check_added_monitors!(nodes[0], 1);
2721 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2722 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2723 check_added_monitors!(nodes[1], 1);
2724 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2725 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2726 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2728 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2729 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2731 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2732 check_spends!(node_txn[0], revoked_local_txn[0]);
2734 let mut witness_lens = BTreeSet::new();
2735 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2736 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2737 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2738 assert_eq!(witness_lens.len(), 3);
2739 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2740 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2741 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2743 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2744 // ANTI_REORG_DELAY confirmations.
2745 mine_transaction(&nodes[1], &node_txn[0]);
2746 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2747 expect_payment_failed!(nodes[1], payment_hash_2, false);
2749 get_announce_close_broadcast_events(&nodes, 0, 1);
2750 assert_eq!(nodes[0].node.list_channels().len(), 0);
2751 assert_eq!(nodes[1].node.list_channels().len(), 0);
2755 fn claim_htlc_outputs_single_tx() {
2756 // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2757 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2758 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2759 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2760 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2761 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2763 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2765 // Rebalance the network to generate htlc in the two directions
2766 send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2767 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2768 // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2769 let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2770 let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2772 // Get the will-be-revoked local txn from node[0]
2773 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2775 //Revoke the old state
2776 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2779 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2780 check_added_monitors!(nodes[0], 1);
2781 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2782 check_added_monitors!(nodes[1], 1);
2783 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2784 let mut events = nodes[0].node.get_and_clear_pending_events();
2785 expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
2786 match events.last().unwrap() {
2787 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2788 _ => panic!("Unexpected event"),
2791 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2792 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2794 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcast();
2796 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2797 assert_eq!(node_txn[0].input.len(), 1);
2798 check_spends!(node_txn[0], chan_1.3);
2799 assert_eq!(node_txn[1].input.len(), 1);
2800 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2801 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2802 check_spends!(node_txn[1], node_txn[0]);
2804 // Filter out any non justice transactions.
2805 node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].txid());
2806 assert!(node_txn.len() > 3);
2808 assert_eq!(node_txn[0].input.len(), 1);
2809 assert_eq!(node_txn[1].input.len(), 1);
2810 assert_eq!(node_txn[2].input.len(), 1);
2812 check_spends!(node_txn[0], revoked_local_txn[0]);
2813 check_spends!(node_txn[1], revoked_local_txn[0]);
2814 check_spends!(node_txn[2], revoked_local_txn[0]);
2816 let mut witness_lens = BTreeSet::new();
2817 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2818 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
2819 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2820 assert_eq!(witness_lens.len(), 3);
2821 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2822 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2823 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2825 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2826 // ANTI_REORG_DELAY confirmations.
2827 mine_transaction(&nodes[1], &node_txn[0]);
2828 mine_transaction(&nodes[1], &node_txn[1]);
2829 mine_transaction(&nodes[1], &node_txn[2]);
2830 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2831 expect_payment_failed!(nodes[1], payment_hash_2, false);
2833 get_announce_close_broadcast_events(&nodes, 0, 1);
2834 assert_eq!(nodes[0].node.list_channels().len(), 0);
2835 assert_eq!(nodes[1].node.list_channels().len(), 0);
2839 fn test_htlc_on_chain_success() {
2840 // Test that in case of a unilateral close onchain, we detect the state of output and pass
2841 // the preimage backward accordingly. So here we test that ChannelManager is
2842 // broadcasting the right event to other nodes in payment path.
2843 // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2844 // A --------------------> B ----------------------> C (preimage)
2845 // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2846 // commitment transaction was broadcast.
2847 // Then, B should learn the preimage from said transactions, attempting to claim backwards
2849 // B should be able to claim via preimage if A then broadcasts its local tx.
2850 // Finally, when A sees B's latest local commitment transaction it should be able to claim
2851 // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2852 // PaymentSent event).
2854 let chanmon_cfgs = create_chanmon_cfgs(3);
2855 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2856 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2857 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2859 // Create some initial channels
2860 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2861 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2863 // Ensure all nodes are at the same height
2864 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2865 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2866 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2867 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2869 // Rebalance the network a bit by relaying one payment through all the channels...
2870 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2871 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2873 let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2874 let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2876 // Broadcast legit commitment tx from C on B's chain
2877 // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2878 let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2879 assert_eq!(commitment_tx.len(), 1);
2880 check_spends!(commitment_tx[0], chan_2.3);
2881 nodes[2].node.claim_funds(our_payment_preimage);
2882 expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2883 nodes[2].node.claim_funds(our_payment_preimage_2);
2884 expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2885 check_added_monitors!(nodes[2], 2);
2886 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2887 assert!(updates.update_add_htlcs.is_empty());
2888 assert!(updates.update_fail_htlcs.is_empty());
2889 assert!(updates.update_fail_malformed_htlcs.is_empty());
2890 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2892 mine_transaction(&nodes[2], &commitment_tx[0]);
2893 check_closed_broadcast!(nodes[2], true);
2894 check_added_monitors!(nodes[2], 1);
2895 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2896 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2897 assert_eq!(node_txn.len(), 2);
2898 check_spends!(node_txn[0], commitment_tx[0]);
2899 check_spends!(node_txn[1], commitment_tx[0]);
2900 assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2901 assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2902 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2903 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2904 assert_eq!(node_txn[0].lock_time, LockTime::ZERO);
2905 assert_eq!(node_txn[1].lock_time, LockTime::ZERO);
2907 // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2908 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
2909 connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2911 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2912 assert_eq!(added_monitors.len(), 1);
2913 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2914 added_monitors.clear();
2916 let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2917 assert_eq!(forwarded_events.len(), 3);
2918 match forwarded_events[0] {
2919 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2920 _ => panic!("Unexpected event"),
2922 let chan_id = Some(chan_1.2);
2923 match forwarded_events[1] {
2924 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2925 next_channel_id, outbound_amount_forwarded_msat, ..
2927 assert_eq!(total_fee_earned_msat, Some(1000));
2928 assert_eq!(prev_channel_id, chan_id);
2929 assert_eq!(claim_from_onchain_tx, true);
2930 assert_eq!(next_channel_id, Some(chan_2.2));
2931 assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2935 match forwarded_events[2] {
2936 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2937 next_channel_id, outbound_amount_forwarded_msat, ..
2939 assert_eq!(total_fee_earned_msat, Some(1000));
2940 assert_eq!(prev_channel_id, chan_id);
2941 assert_eq!(claim_from_onchain_tx, true);
2942 assert_eq!(next_channel_id, Some(chan_2.2));
2943 assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2947 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2949 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2950 assert_eq!(added_monitors.len(), 2);
2951 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2952 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2953 added_monitors.clear();
2955 assert_eq!(events.len(), 3);
2957 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2958 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2960 match nodes_2_event {
2961 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
2962 _ => panic!("Unexpected event"),
2965 match nodes_0_event {
2966 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2967 assert!(update_add_htlcs.is_empty());
2968 assert!(update_fail_htlcs.is_empty());
2969 assert_eq!(update_fulfill_htlcs.len(), 1);
2970 assert!(update_fail_malformed_htlcs.is_empty());
2971 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2973 _ => panic!("Unexpected event"),
2976 // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2978 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2979 _ => panic!("Unexpected event"),
2982 macro_rules! check_tx_local_broadcast {
2983 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2984 let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2985 assert_eq!(node_txn.len(), 2);
2986 // Node[1]: 2 * HTLC-timeout tx
2987 // Node[0]: 2 * HTLC-timeout tx
2988 check_spends!(node_txn[0], $commitment_tx);
2989 check_spends!(node_txn[1], $commitment_tx);
2990 assert_ne!(node_txn[0].lock_time, LockTime::ZERO);
2991 assert_ne!(node_txn[1].lock_time, LockTime::ZERO);
2993 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2994 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2995 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2996 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2998 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2999 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3000 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
3001 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
3006 // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
3007 check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
3009 // Broadcast legit commitment tx from A on B's chain
3010 // Broadcast preimage tx by B on offered output from A commitment tx on A's chain
3011 let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
3012 check_spends!(node_a_commitment_tx[0], chan_1.3);
3013 mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
3014 check_closed_broadcast!(nodes[1], true);
3015 check_added_monitors!(nodes[1], 1);
3016 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3017 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3018 assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
3019 let commitment_spend =
3020 if node_txn.len() == 1 {
3023 // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
3024 // FullBlockViaListen
3025 if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
3026 check_spends!(node_txn[1], commitment_tx[0]);
3027 check_spends!(node_txn[2], commitment_tx[0]);
3028 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
3031 check_spends!(node_txn[0], commitment_tx[0]);
3032 check_spends!(node_txn[1], commitment_tx[0]);
3033 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
3038 check_spends!(commitment_spend, node_a_commitment_tx[0]);
3039 assert_eq!(commitment_spend.input.len(), 2);
3040 assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3041 assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3042 assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1);
3043 assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
3044 // We don't bother to check that B can claim the HTLC output on its commitment tx here as
3045 // we already checked the same situation with A.
3047 // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
3048 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
3049 connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3050 check_closed_broadcast!(nodes[0], true);
3051 check_added_monitors!(nodes[0], 1);
3052 let events = nodes[0].node.get_and_clear_pending_events();
3053 assert_eq!(events.len(), 5);
3054 let mut first_claimed = false;
3055 for event in events {
3057 Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3058 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
3059 assert!(!first_claimed);
3060 first_claimed = true;
3062 assert_eq!(payment_preimage, our_payment_preimage_2);
3063 assert_eq!(payment_hash, payment_hash_2);
3066 Event::PaymentPathSuccessful { .. } => {},
3067 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
3068 _ => panic!("Unexpected event"),
3071 check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
3074 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3075 // Test that in case of a unilateral close onchain, we detect the state of output and
3076 // timeout the HTLC backward accordingly. So here we test that ChannelManager is
3077 // broadcasting the right event to other nodes in payment path.
3078 // A ------------------> B ----------------------> C (timeout)
3079 // B's commitment tx C's commitment tx
3081 // B's HTLC timeout tx B's timeout tx
3083 let chanmon_cfgs = create_chanmon_cfgs(3);
3084 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3085 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3086 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3087 *nodes[0].connect_style.borrow_mut() = connect_style;
3088 *nodes[1].connect_style.borrow_mut() = connect_style;
3089 *nodes[2].connect_style.borrow_mut() = connect_style;
3091 // Create some intial channels
3092 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3093 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3095 // Rebalance the network a bit by relaying one payment thorugh all the channels...
3096 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3097 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3099 let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
3101 // Broadcast legit commitment tx from C on B's chain
3102 let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
3103 check_spends!(commitment_tx[0], chan_2.3);
3104 nodes[2].node.fail_htlc_backwards(&payment_hash);
3105 check_added_monitors!(nodes[2], 0);
3106 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
3107 check_added_monitors!(nodes[2], 1);
3109 let events = nodes[2].node.get_and_clear_pending_msg_events();
3110 assert_eq!(events.len(), 1);
3112 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3113 assert!(update_add_htlcs.is_empty());
3114 assert!(!update_fail_htlcs.is_empty());
3115 assert!(update_fulfill_htlcs.is_empty());
3116 assert!(update_fail_malformed_htlcs.is_empty());
3117 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
3119 _ => panic!("Unexpected event"),
3121 mine_transaction(&nodes[2], &commitment_tx[0]);
3122 check_closed_broadcast!(nodes[2], true);
3123 check_added_monitors!(nodes[2], 1);
3124 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3125 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3126 assert_eq!(node_txn.len(), 0);
3128 // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3129 // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3130 mine_transaction(&nodes[1], &commitment_tx[0]);
3131 check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3132 , [nodes[2].node.get_our_node_id()], 100000);
3133 connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3135 let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
3136 if nodes[1].connect_style.borrow().skips_blocks() {
3137 assert_eq!(txn.len(), 1);
3139 assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction
3141 txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0]));
3142 assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3146 mine_transaction(&nodes[1], &timeout_tx);
3147 check_added_monitors!(nodes[1], 1);
3148 check_closed_broadcast!(nodes[1], true);
3150 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3152 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3153 check_added_monitors!(nodes[1], 1);
3154 let events = nodes[1].node.get_and_clear_pending_msg_events();
3155 assert_eq!(events.len(), 1);
3157 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3158 assert!(update_add_htlcs.is_empty());
3159 assert!(!update_fail_htlcs.is_empty());
3160 assert!(update_fulfill_htlcs.is_empty());
3161 assert!(update_fail_malformed_htlcs.is_empty());
3162 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3164 _ => panic!("Unexpected event"),
3167 // Broadcast legit commitment tx from B on A's chain
3168 let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3169 check_spends!(commitment_tx[0], chan_1.3);
3171 mine_transaction(&nodes[0], &commitment_tx[0]);
3172 connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3174 check_closed_broadcast!(nodes[0], true);
3175 check_added_monitors!(nodes[0], 1);
3176 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3177 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3178 assert_eq!(node_txn.len(), 1);
3179 check_spends!(node_txn[0], commitment_tx[0]);
3180 assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3184 fn test_htlc_on_chain_timeout() {
3185 do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3186 do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3187 do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3191 fn test_simple_commitment_revoked_fail_backward() {
3192 // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3193 // and fail backward accordingly.
3195 let chanmon_cfgs = create_chanmon_cfgs(3);
3196 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3197 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3198 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3200 // Create some initial channels
3201 create_announced_chan_between_nodes(&nodes, 0, 1);
3202 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3204 let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3205 // Get the will-be-revoked local txn from nodes[2]
3206 let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3207 // Revoke the old state
3208 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3210 let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3212 mine_transaction(&nodes[1], &revoked_local_txn[0]);
3213 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3214 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3215 check_added_monitors!(nodes[1], 1);
3216 check_closed_broadcast!(nodes[1], true);
3218 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3219 check_added_monitors!(nodes[1], 1);
3220 let events = nodes[1].node.get_and_clear_pending_msg_events();
3221 assert_eq!(events.len(), 1);
3223 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3224 assert!(update_add_htlcs.is_empty());
3225 assert_eq!(update_fail_htlcs.len(), 1);
3226 assert!(update_fulfill_htlcs.is_empty());
3227 assert!(update_fail_malformed_htlcs.is_empty());
3228 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3230 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3231 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3232 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3234 _ => panic!("Unexpected event"),
3238 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3239 // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3240 // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3241 // commitment transaction anymore.
3242 // To do this, we have the peer which will broadcast a revoked commitment transaction send
3243 // a number of update_fail/commitment_signed updates without ever sending the RAA in
3244 // response to our commitment_signed. This is somewhat misbehavior-y, though not
3245 // technically disallowed and we should probably handle it reasonably.
3246 // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3247 // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3249 // * Once we move it out of our holding cell/add it, we will immediately include it in a
3250 // commitment_signed (implying it will be in the latest remote commitment transaction).
3251 // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3252 // and once they revoke the previous commitment transaction (allowing us to send a new
3253 // commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3254 let chanmon_cfgs = create_chanmon_cfgs(3);
3255 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3256 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3257 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3259 // Create some initial channels
3260 create_announced_chan_between_nodes(&nodes, 0, 1);
3261 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3263 let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3264 // Get the will-be-revoked local txn from nodes[2]
3265 let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3266 assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3267 // Revoke the old state
3268 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3270 let value = if use_dust {
3271 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3272 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3273 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3274 .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
3277 let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3278 let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3279 let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3281 nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3282 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3283 check_added_monitors!(nodes[2], 1);
3284 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3285 assert!(updates.update_add_htlcs.is_empty());
3286 assert!(updates.update_fulfill_htlcs.is_empty());
3287 assert!(updates.update_fail_malformed_htlcs.is_empty());
3288 assert_eq!(updates.update_fail_htlcs.len(), 1);
3289 assert!(updates.update_fee.is_none());
3290 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3291 let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3292 // Drop the last RAA from 3 -> 2
3294 nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3295 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3296 check_added_monitors!(nodes[2], 1);
3297 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3298 assert!(updates.update_add_htlcs.is_empty());
3299 assert!(updates.update_fulfill_htlcs.is_empty());
3300 assert!(updates.update_fail_malformed_htlcs.is_empty());
3301 assert_eq!(updates.update_fail_htlcs.len(), 1);
3302 assert!(updates.update_fee.is_none());
3303 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3304 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3305 check_added_monitors!(nodes[1], 1);
3306 // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3307 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3308 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3309 check_added_monitors!(nodes[2], 1);
3311 nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3312 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3313 check_added_monitors!(nodes[2], 1);
3314 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3315 assert!(updates.update_add_htlcs.is_empty());
3316 assert!(updates.update_fulfill_htlcs.is_empty());
3317 assert!(updates.update_fail_malformed_htlcs.is_empty());
3318 assert_eq!(updates.update_fail_htlcs.len(), 1);
3319 assert!(updates.update_fee.is_none());
3320 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3321 // At this point first_payment_hash has dropped out of the latest two commitment
3322 // transactions that nodes[1] is tracking...
3323 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3324 check_added_monitors!(nodes[1], 1);
3325 // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3326 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3327 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3328 check_added_monitors!(nodes[2], 1);
3330 // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3331 // on nodes[2]'s RAA.
3332 let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3333 nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3334 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3335 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3336 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3337 check_added_monitors!(nodes[1], 0);
3340 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3341 // One monitor for the new revocation preimage, no second on as we won't generate a new
3342 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3343 check_added_monitors!(nodes[1], 1);
3344 let events = nodes[1].node.get_and_clear_pending_events();
3345 assert_eq!(events.len(), 2);
3347 Event::HTLCHandlingFailed { .. } => { },
3348 _ => panic!("Unexpected event"),
3351 Event::PendingHTLCsForwardable { .. } => { },
3352 _ => panic!("Unexpected event"),
3354 // Deliberately don't process the pending fail-back so they all fail back at once after
3355 // block connection just like the !deliver_bs_raa case
3358 let mut failed_htlcs = new_hash_set();
3359 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3361 mine_transaction(&nodes[1], &revoked_local_txn[0]);
3362 check_added_monitors!(nodes[1], 1);
3363 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3365 let events = nodes[1].node.get_and_clear_pending_events();
3366 assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3367 assert!(events.iter().any(|ev| matches!(
3369 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
3371 assert!(events.iter().any(|ev| matches!(
3373 Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3375 assert!(events.iter().any(|ev| matches!(
3377 Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3380 nodes[1].node.process_pending_htlc_forwards();
3381 check_added_monitors!(nodes[1], 1);
3383 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3384 assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3387 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3388 match nodes_2_event {
3389 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3390 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3391 assert_eq!(update_add_htlcs.len(), 1);
3392 assert!(update_fulfill_htlcs.is_empty());
3393 assert!(update_fail_htlcs.is_empty());
3394 assert!(update_fail_malformed_htlcs.is_empty());
3396 _ => panic!("Unexpected event"),
3400 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3401 match nodes_2_event {
3402 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => {
3403 assert_eq!(channel_id, chan_2.2);
3404 assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3406 _ => panic!("Unexpected event"),
3409 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3410 match nodes_0_event {
3411 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3412 assert!(update_add_htlcs.is_empty());
3413 assert_eq!(update_fail_htlcs.len(), 3);
3414 assert!(update_fulfill_htlcs.is_empty());
3415 assert!(update_fail_malformed_htlcs.is_empty());
3416 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3418 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3419 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3420 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3422 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3424 let events = nodes[0].node.get_and_clear_pending_events();
3425 assert_eq!(events.len(), 6);
3427 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3428 assert!(failed_htlcs.insert(payment_hash.0));
3429 // If we delivered B's RAA we got an unknown preimage error, not something
3430 // that we should update our routing table for.
3431 if !deliver_bs_raa {
3432 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3435 _ => panic!("Unexpected event"),
3438 Event::PaymentFailed { ref payment_hash, .. } => {
3439 assert_eq!(*payment_hash, first_payment_hash);
3441 _ => panic!("Unexpected event"),
3444 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3445 assert!(failed_htlcs.insert(payment_hash.0));
3447 _ => panic!("Unexpected event"),
3450 Event::PaymentFailed { ref payment_hash, .. } => {
3451 assert_eq!(*payment_hash, second_payment_hash);
3453 _ => panic!("Unexpected event"),
3456 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3457 assert!(failed_htlcs.insert(payment_hash.0));
3459 _ => panic!("Unexpected event"),
3462 Event::PaymentFailed { ref payment_hash, .. } => {
3463 assert_eq!(*payment_hash, third_payment_hash);
3465 _ => panic!("Unexpected event"),
3468 _ => panic!("Unexpected event"),
3471 // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3473 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3474 _ => panic!("Unexpected event"),
3477 assert!(failed_htlcs.contains(&first_payment_hash.0));
3478 assert!(failed_htlcs.contains(&second_payment_hash.0));
3479 assert!(failed_htlcs.contains(&third_payment_hash.0));
3483 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3484 do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3485 do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3486 do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3487 do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3491 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3492 do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3493 do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3494 do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3495 do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3499 fn fail_backward_pending_htlc_upon_channel_failure() {
3500 let chanmon_cfgs = create_chanmon_cfgs(2);
3501 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3502 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3503 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3504 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3506 // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3508 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3509 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3510 PaymentId(payment_hash.0)).unwrap();
3511 check_added_monitors!(nodes[0], 1);
3513 let payment_event = {
3514 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3515 assert_eq!(events.len(), 1);
3516 SendEvent::from_event(events.remove(0))
3518 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3519 assert_eq!(payment_event.msgs.len(), 1);
3522 // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3523 let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3525 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3526 RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3527 check_added_monitors!(nodes[0], 0);
3529 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3532 // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3534 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3536 let secp_ctx = Secp256k1::new();
3537 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3538 let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
3539 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
3540 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
3541 &route.paths[0], 50_000, &recipient_onion_fields, current_height, &None).unwrap();
3542 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3543 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
3545 // Send a 0-msat update_add_htlc to fail the channel.
3546 let update_add_htlc = msgs::UpdateAddHTLC {
3552 onion_routing_packet,
3553 skimmed_fee_msat: None,
3554 blinding_point: None,
3556 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3558 let events = nodes[0].node.get_and_clear_pending_events();
3559 assert_eq!(events.len(), 3);
3560 // Check that Alice fails backward the pending HTLC from the second payment.
3562 Event::PaymentPathFailed { payment_hash, .. } => {
3563 assert_eq!(payment_hash, failed_payment_hash);
3565 _ => panic!("Unexpected event"),
3568 Event::PaymentFailed { payment_hash, .. } => {
3569 assert_eq!(payment_hash, failed_payment_hash);
3571 _ => panic!("Unexpected event"),
3574 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3575 assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3577 _ => panic!("Unexpected event {:?}", events[1]),
3579 check_closed_broadcast!(nodes[0], true);
3580 check_added_monitors!(nodes[0], 1);
3584 fn test_htlc_ignore_latest_remote_commitment() {
3585 // Test that HTLC transactions spending the latest remote commitment transaction are simply
3586 // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3587 let chanmon_cfgs = create_chanmon_cfgs(2);
3588 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3589 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3590 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3591 if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3592 // We rely on the ability to connect a block redundantly, which isn't allowed via
3593 // `chain::Listen`, so we never run the test if we randomly get assigned that
3597 let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
3599 route_payment(&nodes[0], &[&nodes[1]], 10000000);
3600 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3601 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3602 check_closed_broadcast!(nodes[0], true);
3603 check_added_monitors!(nodes[0], 1);
3604 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3606 let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
3607 assert_eq!(node_txn.len(), 2);
3608 check_spends!(node_txn[0], funding_tx);
3609 check_spends!(node_txn[1], node_txn[0]);
3611 let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
3612 connect_block(&nodes[1], &block);
3613 check_closed_broadcast!(nodes[1], true);
3614 check_added_monitors!(nodes[1], 1);
3615 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3617 // Duplicate the connect_block call since this may happen due to other listeners
3618 // registering new transactions
3619 connect_block(&nodes[1], &block);
3623 fn test_force_close_fail_back() {
3624 // Check which HTLCs are failed-backwards on channel force-closure
3625 let chanmon_cfgs = create_chanmon_cfgs(3);
3626 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3627 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3628 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3629 create_announced_chan_between_nodes(&nodes, 0, 1);
3630 create_announced_chan_between_nodes(&nodes, 1, 2);
3632 let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3634 let mut payment_event = {
3635 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3636 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3637 check_added_monitors!(nodes[0], 1);
3639 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3640 assert_eq!(events.len(), 1);
3641 SendEvent::from_event(events.remove(0))
3644 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3645 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3647 expect_pending_htlcs_forwardable!(nodes[1]);
3649 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3650 assert_eq!(events_2.len(), 1);
3651 payment_event = SendEvent::from_event(events_2.remove(0));
3652 assert_eq!(payment_event.msgs.len(), 1);
3654 check_added_monitors!(nodes[1], 1);
3655 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3656 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3657 check_added_monitors!(nodes[2], 1);
3658 let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3660 // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3661 // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3662 // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3664 nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3665 check_closed_broadcast!(nodes[2], true);
3666 check_added_monitors!(nodes[2], 1);
3667 check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3668 let commitment_tx = {
3669 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3670 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3671 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3672 // back to nodes[1] upon timeout otherwise.
3673 assert_eq!(node_txn.len(), 1);
3677 mine_transaction(&nodes[1], &commitment_tx);
3679 // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3680 check_closed_broadcast!(nodes[1], true);
3681 check_added_monitors!(nodes[1], 1);
3682 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3684 // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3686 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3687 .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3689 mine_transaction(&nodes[2], &commitment_tx);
3690 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
3691 assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
3692 let htlc_tx = node_txn.pop().unwrap();
3693 assert_eq!(htlc_tx.input.len(), 1);
3694 assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
3695 assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
3696 assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
3698 check_spends!(htlc_tx, commitment_tx);
3702 fn test_dup_events_on_peer_disconnect() {
3703 // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3704 // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3705 // as we used to generate the event immediately upon receipt of the payment preimage in the
3706 // update_fulfill_htlc message.
3708 let chanmon_cfgs = create_chanmon_cfgs(2);
3709 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3710 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3711 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3712 create_announced_chan_between_nodes(&nodes, 0, 1);
3714 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3716 nodes[1].node.claim_funds(payment_preimage);
3717 expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3718 check_added_monitors!(nodes[1], 1);
3719 let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3720 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3721 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
3723 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3724 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3726 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3727 reconnect_args.pending_htlc_claims.0 = 1;
3728 reconnect_nodes(reconnect_args);
3729 expect_payment_path_successful!(nodes[0]);
3733 fn test_peer_disconnected_before_funding_broadcasted() {
3734 // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3735 // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
3736 let chanmon_cfgs = create_chanmon_cfgs(2);
3737 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3738 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3739 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3741 // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3742 // broadcasted, even though it's created by `nodes[0]`.
3743 let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
3744 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3745 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3746 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3747 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3749 let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3750 assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3752 assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3754 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3755 assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3757 // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3758 // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3761 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3764 // The peers disconnect before the funding is broadcasted.
3765 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3766 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3768 // The time for peers to reconnect expires.
3769 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
3770 nodes[0].node.timer_tick_occurred();
3773 // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` and a
3774 // `DiscardFunding` event when the peers are disconnected and do not reconnect before the
3775 // funding transaction is broadcasted.
3776 check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
3777 , [nodes[1].node.get_our_node_id()], 1000000);
3778 check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
3779 , [nodes[0].node.get_our_node_id()], 1000000);
3783 fn test_simple_peer_disconnect() {
3784 // Test that we can reconnect when there are no lost messages
3785 let chanmon_cfgs = create_chanmon_cfgs(3);
3786 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3787 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3788 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3789 create_announced_chan_between_nodes(&nodes, 0, 1);
3790 create_announced_chan_between_nodes(&nodes, 1, 2);
3792 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3793 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3794 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3795 reconnect_args.send_channel_ready = (true, true);
3796 reconnect_nodes(reconnect_args);
3798 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3799 let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3800 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3801 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3803 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3804 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3805 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3807 let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3808 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3809 let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3810 let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3812 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3813 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3815 claim_payment_along_route(
3816 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_3)
3819 fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3821 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3822 reconnect_args.pending_cell_htlc_fails.0 = 1;
3823 reconnect_args.pending_cell_htlc_claims.0 = 1;
3824 reconnect_nodes(reconnect_args);
3826 let events = nodes[0].node.get_and_clear_pending_events();
3827 assert_eq!(events.len(), 4);
3829 Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3830 assert_eq!(payment_preimage, payment_preimage_3);
3831 assert_eq!(payment_hash, payment_hash_3);
3833 _ => panic!("Unexpected event"),
3836 Event::PaymentPathSuccessful { .. } => {},
3837 _ => panic!("Unexpected event"),
3840 Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3841 assert_eq!(payment_hash, payment_hash_5);
3842 assert!(payment_failed_permanently);
3844 _ => panic!("Unexpected event"),
3847 Event::PaymentFailed { payment_hash, .. } => {
3848 assert_eq!(payment_hash, payment_hash_5);
3850 _ => panic!("Unexpected event"),
3853 check_added_monitors(&nodes[0], 1);
3855 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3856 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3859 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3860 // Test that we can reconnect when in-flight HTLC updates get dropped
3861 let chanmon_cfgs = create_chanmon_cfgs(2);
3862 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3863 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3864 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3866 let mut as_channel_ready = None;
3867 let channel_id = if messages_delivered == 0 {
3868 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3869 as_channel_ready = Some(channel_ready);
3870 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3871 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3872 // it before the channel_reestablish message.
3875 create_announced_chan_between_nodes(&nodes, 0, 1).2
3878 let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3880 let payment_event = {
3881 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3882 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3883 check_added_monitors!(nodes[0], 1);
3885 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3886 assert_eq!(events.len(), 1);
3887 SendEvent::from_event(events.remove(0))
3889 assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3891 if messages_delivered < 2 {
3892 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3894 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3895 if messages_delivered >= 3 {
3896 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3897 check_added_monitors!(nodes[1], 1);
3898 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3900 if messages_delivered >= 4 {
3901 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3902 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3903 check_added_monitors!(nodes[0], 1);
3905 if messages_delivered >= 5 {
3906 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3907 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3908 // No commitment_signed so get_event_msg's assert(len == 1) passes
3909 check_added_monitors!(nodes[0], 1);
3911 if messages_delivered >= 6 {
3912 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3913 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3914 check_added_monitors!(nodes[1], 1);
3921 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3922 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3923 if messages_delivered < 3 {
3924 if simulate_broken_lnd {
3925 // lnd has a long-standing bug where they send a channel_ready prior to a
3926 // channel_reestablish if you reconnect prior to channel_ready time.
3928 // Here we simulate that behavior, delivering a channel_ready immediately on
3929 // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3930 // in `reconnect_nodes` but we currently don't fail based on that.
3932 // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3933 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3935 // Even if the channel_ready messages get exchanged, as long as nothing further was
3936 // received on either side, both sides will need to resend them.
3937 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3938 reconnect_args.send_channel_ready = (true, true);
3939 reconnect_args.pending_htlc_adds.1 = 1;
3940 reconnect_nodes(reconnect_args);
3941 } else if messages_delivered == 3 {
3942 // nodes[0] still wants its RAA + commitment_signed
3943 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3944 reconnect_args.pending_responding_commitment_signed.0 = true;
3945 reconnect_args.pending_raa.0 = true;
3946 reconnect_nodes(reconnect_args);
3947 } else if messages_delivered == 4 {
3948 // nodes[0] still wants its commitment_signed
3949 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3950 reconnect_args.pending_responding_commitment_signed.0 = true;
3951 reconnect_nodes(reconnect_args);
3952 } else if messages_delivered == 5 {
3953 // nodes[1] still wants its final RAA
3954 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3955 reconnect_args.pending_raa.1 = true;
3956 reconnect_nodes(reconnect_args);
3957 } else if messages_delivered == 6 {
3958 // Everything was delivered...
3959 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3962 let events_1 = nodes[1].node.get_and_clear_pending_events();
3963 if messages_delivered == 0 {
3964 assert_eq!(events_1.len(), 2);
3966 Event::ChannelReady { .. } => { },
3967 _ => panic!("Unexpected event"),
3970 Event::PendingHTLCsForwardable { .. } => { },
3971 _ => panic!("Unexpected event"),
3974 assert_eq!(events_1.len(), 1);
3976 Event::PendingHTLCsForwardable { .. } => { },
3977 _ => panic!("Unexpected event"),
3981 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3982 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3983 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3985 nodes[1].node.process_pending_htlc_forwards();
3987 let events_2 = nodes[1].node.get_and_clear_pending_events();
3988 assert_eq!(events_2.len(), 1);
3990 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
3991 assert_eq!(payment_hash_1, *payment_hash);
3992 assert_eq!(amount_msat, 1_000_000);
3993 assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3994 assert_eq!(via_channel_id, Some(channel_id));
3996 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
3997 assert!(payment_preimage.is_none());
3998 assert_eq!(payment_secret_1, *payment_secret);
4000 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
4003 _ => panic!("Unexpected event"),
4006 nodes[1].node.claim_funds(payment_preimage_1);
4007 check_added_monitors!(nodes[1], 1);
4008 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4010 let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
4011 assert_eq!(events_3.len(), 1);
4012 let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
4013 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
4014 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4015 assert!(updates.update_add_htlcs.is_empty());
4016 assert!(updates.update_fail_htlcs.is_empty());
4017 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4018 assert!(updates.update_fail_malformed_htlcs.is_empty());
4019 assert!(updates.update_fee.is_none());
4020 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
4022 _ => panic!("Unexpected event"),
4025 if messages_delivered >= 1 {
4026 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
4028 let events_4 = nodes[0].node.get_and_clear_pending_events();
4029 assert_eq!(events_4.len(), 1);
4031 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4032 assert_eq!(payment_preimage_1, *payment_preimage);
4033 assert_eq!(payment_hash_1, *payment_hash);
4035 _ => panic!("Unexpected event"),
4038 if messages_delivered >= 2 {
4039 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
4040 check_added_monitors!(nodes[0], 1);
4041 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4043 if messages_delivered >= 3 {
4044 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4045 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4046 check_added_monitors!(nodes[1], 1);
4048 if messages_delivered >= 4 {
4049 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
4050 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4051 // No commitment_signed so get_event_msg's assert(len == 1) passes
4052 check_added_monitors!(nodes[1], 1);
4054 if messages_delivered >= 5 {
4055 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4056 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4057 check_added_monitors!(nodes[0], 1);
4064 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4065 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4066 if messages_delivered < 2 {
4067 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4068 reconnect_args.pending_htlc_claims.0 = 1;
4069 reconnect_nodes(reconnect_args);
4070 if messages_delivered < 1 {
4071 expect_payment_sent!(nodes[0], payment_preimage_1);
4073 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4075 } else if messages_delivered == 2 {
4076 // nodes[0] still wants its RAA + commitment_signed
4077 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4078 reconnect_args.pending_responding_commitment_signed.1 = true;
4079 reconnect_args.pending_raa.1 = true;
4080 reconnect_nodes(reconnect_args);
4081 } else if messages_delivered == 3 {
4082 // nodes[0] still wants its commitment_signed
4083 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4084 reconnect_args.pending_responding_commitment_signed.1 = true;
4085 reconnect_nodes(reconnect_args);
4086 } else if messages_delivered == 4 {
4087 // nodes[1] still wants its final RAA
4088 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4089 reconnect_args.pending_raa.0 = true;
4090 reconnect_nodes(reconnect_args);
4091 } else if messages_delivered == 5 {
4092 // Everything was delivered...
4093 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4096 if messages_delivered == 1 || messages_delivered == 2 {
4097 expect_payment_path_successful!(nodes[0]);
4099 if messages_delivered <= 5 {
4100 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4101 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4103 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4105 if messages_delivered > 2 {
4106 expect_payment_path_successful!(nodes[0]);
4109 // Channel should still work fine...
4110 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4111 let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
4112 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4116 fn test_drop_messages_peer_disconnect_a() {
4117 do_test_drop_messages_peer_disconnect(0, true);
4118 do_test_drop_messages_peer_disconnect(0, false);
4119 do_test_drop_messages_peer_disconnect(1, false);
4120 do_test_drop_messages_peer_disconnect(2, false);
4124 fn test_drop_messages_peer_disconnect_b() {
4125 do_test_drop_messages_peer_disconnect(3, false);
4126 do_test_drop_messages_peer_disconnect(4, false);
4127 do_test_drop_messages_peer_disconnect(5, false);
4128 do_test_drop_messages_peer_disconnect(6, false);
4132 fn test_channel_ready_without_best_block_updated() {
4133 // Previously, if we were offline when a funding transaction was locked in, and then we came
4134 // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
4135 // generate a channel_ready until a later best_block_updated. This tests that we generate the
4136 // channel_ready immediately instead.
4137 let chanmon_cfgs = create_chanmon_cfgs(2);
4138 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4139 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4140 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4141 *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
4143 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4145 let conf_height = nodes[0].best_block_info().1 + 1;
4146 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4147 let block_txn = [funding_tx];
4148 let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
4149 let conf_block_header = nodes[0].get_block_header(conf_height);
4150 nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
4152 // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4153 let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4154 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4158 fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
4159 let chanmon_cfgs = create_chanmon_cfgs(2);
4160 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4161 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4162 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4164 // Let channel_manager get ahead of chain_monitor by 1 block.
4165 // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4166 // in case where client calls block_connect on channel_manager first and then on chain_monitor.
4167 let height_1 = nodes[0].best_block_info().1 + 1;
4168 let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4170 nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4171 nodes[0].node.block_connected(&block_1, height_1);
4173 // Create channel, and it gets added to chain_monitor in funding_created.
4174 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4176 // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
4177 // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
4178 // was running ahead of chain_monitor at the time of funding_created.
4179 // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4180 // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4181 confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4182 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4184 // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4185 let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4186 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4190 fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
4191 let chanmon_cfgs = create_chanmon_cfgs(2);
4192 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4193 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4194 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4196 // Let chain_monitor get ahead of channel_manager by 1 block.
4197 // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4198 // in case where client calls block_connect on chain_monitor first and then on channel_manager.
4199 let height_1 = nodes[0].best_block_info().1 + 1;
4200 let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4202 nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4203 nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
4205 // Create channel, and it gets added to chain_monitor in funding_created.
4206 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4208 // channel_manager can't really skip block_1, it should get it eventually.
4209 nodes[0].node.block_connected(&block_1, height_1);
4211 // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
4212 // the block before block_1, since that was populated by channel_manager, and channel_manager was
4213 // running behind at the time of funding_created.
4214 // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4215 // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4216 confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4217 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4219 // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4220 let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4221 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4225 fn test_drop_messages_peer_disconnect_dual_htlc() {
4226 // Test that we can handle reconnecting when both sides of a channel have pending
4227 // commitment_updates when we disconnect.
4228 let chanmon_cfgs = create_chanmon_cfgs(2);
4229 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4230 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4231 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4232 create_announced_chan_between_nodes(&nodes, 0, 1);
4234 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
4236 // Now try to send a second payment which will fail to send
4237 let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4238 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
4239 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
4240 check_added_monitors!(nodes[0], 1);
4242 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4243 assert_eq!(events_1.len(), 1);
4245 MessageSendEvent::UpdateHTLCs { .. } => {},
4246 _ => panic!("Unexpected event"),
4249 nodes[1].node.claim_funds(payment_preimage_1);
4250 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4251 check_added_monitors!(nodes[1], 1);
4253 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4254 assert_eq!(events_2.len(), 1);
4256 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4257 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4258 assert!(update_add_htlcs.is_empty());
4259 assert_eq!(update_fulfill_htlcs.len(), 1);
4260 assert!(update_fail_htlcs.is_empty());
4261 assert!(update_fail_malformed_htlcs.is_empty());
4262 assert!(update_fee.is_none());
4264 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4265 let events_3 = nodes[0].node.get_and_clear_pending_events();
4266 assert_eq!(events_3.len(), 1);
4268 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4269 assert_eq!(*payment_preimage, payment_preimage_1);
4270 assert_eq!(*payment_hash, payment_hash_1);
4272 _ => panic!("Unexpected event"),
4275 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4276 let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4277 // No commitment_signed so get_event_msg's assert(len == 1) passes
4278 check_added_monitors!(nodes[0], 1);
4280 _ => panic!("Unexpected event"),
4283 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4284 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4286 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
4287 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
4289 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4290 assert_eq!(reestablish_1.len(), 1);
4291 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
4292 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
4294 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4295 assert_eq!(reestablish_2.len(), 1);
4297 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4298 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4299 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4300 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4302 assert!(as_resp.0.is_none());
4303 assert!(bs_resp.0.is_none());
4305 assert!(bs_resp.1.is_none());
4306 assert!(bs_resp.2.is_none());
4308 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4310 assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4311 assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4312 assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4313 assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4314 assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4315 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4316 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4317 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4318 // No commitment_signed so get_event_msg's assert(len == 1) passes
4319 check_added_monitors!(nodes[1], 1);
4321 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4322 let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4323 assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4324 assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4325 assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4326 assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4327 assert!(bs_second_commitment_signed.update_fee.is_none());
4328 check_added_monitors!(nodes[1], 1);
4330 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4331 let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4332 assert!(as_commitment_signed.update_add_htlcs.is_empty());
4333 assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4334 assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4335 assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4336 assert!(as_commitment_signed.update_fee.is_none());
4337 check_added_monitors!(nodes[0], 1);
4339 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4340 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4341 // No commitment_signed so get_event_msg's assert(len == 1) passes
4342 check_added_monitors!(nodes[0], 1);
4344 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4345 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4346 // No commitment_signed so get_event_msg's assert(len == 1) passes
4347 check_added_monitors!(nodes[1], 1);
4349 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4350 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4351 check_added_monitors!(nodes[1], 1);
4353 expect_pending_htlcs_forwardable!(nodes[1]);
4355 let events_5 = nodes[1].node.get_and_clear_pending_events();
4356 assert_eq!(events_5.len(), 1);
4358 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4359 assert_eq!(payment_hash_2, *payment_hash);
4361 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
4362 assert!(payment_preimage.is_none());
4363 assert_eq!(payment_secret_2, *payment_secret);
4365 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
4368 _ => panic!("Unexpected event"),
4371 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4372 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4373 check_added_monitors!(nodes[0], 1);
4375 expect_payment_path_successful!(nodes[0]);
4376 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4379 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4380 // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4381 // to avoid our counterparty failing the channel.
4382 let chanmon_cfgs = create_chanmon_cfgs(2);
4383 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4384 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4385 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4387 create_announced_chan_between_nodes(&nodes, 0, 1);
4389 let our_payment_hash = if send_partial_mpp {
4390 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4391 // Use the utility function send_payment_along_path to send the payment with MPP data which
4392 // indicates there are more HTLCs coming.
4393 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4394 let payment_id = PaymentId([42; 32]);
4395 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
4396 RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap();
4397 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
4398 RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id,
4399 &None, session_privs[0]).unwrap();
4400 check_added_monitors!(nodes[0], 1);
4401 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4402 assert_eq!(events.len(), 1);
4403 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4404 // hop should *not* yet generate any PaymentClaimable event(s).
4405 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4408 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4411 let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
4412 connect_block(&nodes[0], &block);
4413 connect_block(&nodes[1], &block);
4414 let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4415 for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4416 block.header.prev_blockhash = block.block_hash();
4417 connect_block(&nodes[0], &block);
4418 connect_block(&nodes[1], &block);
4421 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4423 check_added_monitors!(nodes[1], 1);
4424 let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4425 assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4426 assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4427 assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4428 assert!(htlc_timeout_updates.update_fee.is_none());
4430 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4431 commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4432 // 100_000 msat as u64, followed by the height at which we failed back above
4433 let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4434 expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4435 expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4439 fn test_htlc_timeout() {
4440 do_test_htlc_timeout(true);
4441 do_test_htlc_timeout(false);
4444 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4445 // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4446 let chanmon_cfgs = create_chanmon_cfgs(3);
4447 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4448 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4449 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4450 create_announced_chan_between_nodes(&nodes, 0, 1);
4451 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4453 // Make sure all nodes are at the same starting height
4454 connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4455 connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4456 connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4458 // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4459 let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4460 nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4461 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4462 assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4463 check_added_monitors!(nodes[1], 1);
4465 // Now attempt to route a second payment, which should be placed in the holding cell
4466 let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4467 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4468 sending_node.node.send_payment_with_route(&route, second_payment_hash,
4469 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4471 check_added_monitors!(nodes[0], 1);
4472 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4473 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4474 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4475 expect_pending_htlcs_forwardable!(nodes[1]);
4477 check_added_monitors!(nodes[1], 0);
4479 connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4480 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4481 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4482 connect_blocks(&nodes[1], 1);
4485 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4486 check_added_monitors!(nodes[1], 1);
4487 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4488 assert_eq!(fail_commit.len(), 1);
4489 match fail_commit[0] {
4490 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4491 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4492 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4494 _ => unreachable!(),
4496 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4498 expect_payment_failed!(nodes[1], second_payment_hash, false);
4503 fn test_holding_cell_htlc_add_timeouts() {
4504 do_test_holding_cell_htlc_add_timeouts(false);
4505 do_test_holding_cell_htlc_add_timeouts(true);
4508 macro_rules! check_spendable_outputs {
4509 ($node: expr, $keysinterface: expr) => {
4511 let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4512 let mut txn = Vec::new();
4513 let mut all_outputs = Vec::new();
4514 let secp_ctx = Secp256k1::new();
4515 for event in events.drain(..) {
4517 Event::SpendableOutputs { mut outputs, channel_id: _ } => {
4518 for outp in outputs.drain(..) {
4519 txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
4520 all_outputs.push(outp);
4523 _ => panic!("Unexpected event"),
4526 if all_outputs.len() > 1 {
4527 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
4537 fn test_claim_sizeable_push_msat() {
4538 // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4539 let chanmon_cfgs = create_chanmon_cfgs(2);
4540 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4541 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4542 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4544 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4545 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4546 check_closed_broadcast!(nodes[1], true);
4547 check_added_monitors!(nodes[1], 1);
4548 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
4549 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4550 assert_eq!(node_txn.len(), 1);
4551 check_spends!(node_txn[0], chan.3);
4552 assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4554 mine_transaction(&nodes[1], &node_txn[0]);
4555 connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4557 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4558 assert_eq!(spend_txn.len(), 1);
4559 assert_eq!(spend_txn[0].input.len(), 1);
4560 check_spends!(spend_txn[0], node_txn[0]);
4561 assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4565 fn test_claim_on_remote_sizeable_push_msat() {
4566 // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4567 // to_remote output is encumbered by a P2WPKH
4568 let chanmon_cfgs = create_chanmon_cfgs(2);
4569 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4570 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4571 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4573 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4574 nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4575 check_closed_broadcast!(nodes[0], true);
4576 check_added_monitors!(nodes[0], 1);
4577 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
4579 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4580 assert_eq!(node_txn.len(), 1);
4581 check_spends!(node_txn[0], chan.3);
4582 assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4584 mine_transaction(&nodes[1], &node_txn[0]);
4585 check_closed_broadcast!(nodes[1], true);
4586 check_added_monitors!(nodes[1], 1);
4587 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4588 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4590 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4591 assert_eq!(spend_txn.len(), 1);
4592 check_spends!(spend_txn[0], node_txn[0]);
4596 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4597 // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4598 // to_remote output is encumbered by a P2WPKH
4600 let chanmon_cfgs = create_chanmon_cfgs(2);
4601 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4602 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4603 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4605 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4606 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4607 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4608 assert_eq!(revoked_local_txn[0].input.len(), 1);
4609 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4611 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4612 mine_transaction(&nodes[1], &revoked_local_txn[0]);
4613 check_closed_broadcast!(nodes[1], true);
4614 check_added_monitors!(nodes[1], 1);
4615 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4617 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4618 mine_transaction(&nodes[1], &node_txn[0]);
4619 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4621 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4622 assert_eq!(spend_txn.len(), 3);
4623 check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4624 check_spends!(spend_txn[1], node_txn[0]);
4625 check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4629 fn test_static_spendable_outputs_preimage_tx() {
4630 let chanmon_cfgs = create_chanmon_cfgs(2);
4631 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4632 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4633 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4635 // Create some initial channels
4636 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4638 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4640 let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4641 assert_eq!(commitment_tx[0].input.len(), 1);
4642 assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4644 // Settle A's commitment tx on B's chain
4645 nodes[1].node.claim_funds(payment_preimage);
4646 expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4647 check_added_monitors!(nodes[1], 1);
4648 mine_transaction(&nodes[1], &commitment_tx[0]);
4649 check_added_monitors!(nodes[1], 1);
4650 let events = nodes[1].node.get_and_clear_pending_msg_events();
4652 MessageSendEvent::UpdateHTLCs { .. } => {},
4653 _ => panic!("Unexpected event"),
4656 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4657 _ => panic!("Unexepected event"),
4660 // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4661 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4662 assert_eq!(node_txn.len(), 1);
4663 check_spends!(node_txn[0], commitment_tx[0]);
4664 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4666 mine_transaction(&nodes[1], &node_txn[0]);
4667 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4668 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4670 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4671 assert_eq!(spend_txn.len(), 1);
4672 check_spends!(spend_txn[0], node_txn[0]);
4676 fn test_static_spendable_outputs_timeout_tx() {
4677 let chanmon_cfgs = create_chanmon_cfgs(2);
4678 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4679 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4680 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4682 // Create some initial channels
4683 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4685 // Rebalance the network a bit by relaying one payment through all the channels ...
4686 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4688 let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4690 let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4691 assert_eq!(commitment_tx[0].input.len(), 1);
4692 assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4694 // Settle A's commitment tx on B' chain
4695 mine_transaction(&nodes[1], &commitment_tx[0]);
4696 check_added_monitors!(nodes[1], 1);
4697 let events = nodes[1].node.get_and_clear_pending_msg_events();
4699 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4700 _ => panic!("Unexpected event"),
4702 connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4704 // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4705 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4706 assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4707 check_spends!(node_txn[0], commitment_tx[0].clone());
4708 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4710 mine_transaction(&nodes[1], &node_txn[0]);
4711 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4712 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4713 expect_payment_failed!(nodes[1], our_payment_hash, false);
4715 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4716 assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4717 check_spends!(spend_txn[0], commitment_tx[0]);
4718 check_spends!(spend_txn[1], node_txn[0]);
4719 check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4723 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4724 let chanmon_cfgs = create_chanmon_cfgs(2);
4725 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4726 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4727 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4729 // Create some initial channels
4730 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4732 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4733 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4734 assert_eq!(revoked_local_txn[0].input.len(), 1);
4735 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4737 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4739 mine_transaction(&nodes[1], &revoked_local_txn[0]);
4740 check_closed_broadcast!(nodes[1], true);
4741 check_added_monitors!(nodes[1], 1);
4742 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4744 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4745 assert_eq!(node_txn.len(), 1);
4746 assert_eq!(node_txn[0].input.len(), 2);
4747 check_spends!(node_txn[0], revoked_local_txn[0]);
4749 mine_transaction(&nodes[1], &node_txn[0]);
4750 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4752 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4753 assert_eq!(spend_txn.len(), 1);
4754 check_spends!(spend_txn[0], node_txn[0]);
4758 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4759 let mut chanmon_cfgs = create_chanmon_cfgs(2);
4760 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4761 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4762 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4763 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4765 // Create some initial channels
4766 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4768 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4769 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4770 assert_eq!(revoked_local_txn[0].input.len(), 1);
4771 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4773 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4775 // A will generate HTLC-Timeout from revoked commitment tx
4776 mine_transaction(&nodes[0], &revoked_local_txn[0]);
4777 check_closed_broadcast!(nodes[0], true);
4778 check_added_monitors!(nodes[0], 1);
4779 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4780 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4782 let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4783 assert_eq!(revoked_htlc_txn.len(), 1);
4784 assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4785 assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4786 check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4787 assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout
4789 // B will generate justice tx from A's revoked commitment/HTLC tx
4790 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4791 check_closed_broadcast!(nodes[1], true);
4792 check_added_monitors!(nodes[1], 1);
4793 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4795 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4796 assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4797 // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4798 // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4799 // transactions next...
4800 assert_eq!(node_txn[0].input.len(), 3);
4801 check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4803 assert_eq!(node_txn[1].input.len(), 2);
4804 check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4805 if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4806 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4808 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4809 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4812 mine_transaction(&nodes[1], &node_txn[1]);
4813 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4815 // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4816 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4817 assert_eq!(spend_txn.len(), 1);
4818 assert_eq!(spend_txn[0].input.len(), 1);
4819 check_spends!(spend_txn[0], node_txn[1]);
4823 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4824 let mut chanmon_cfgs = create_chanmon_cfgs(2);
4825 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4826 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4827 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4828 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4830 // Create some initial channels
4831 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4833 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4834 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4835 assert_eq!(revoked_local_txn[0].input.len(), 1);
4836 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4838 // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4839 assert_eq!(revoked_local_txn[0].output.len(), 2);
4841 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4843 // B will generate HTLC-Success from revoked commitment tx
4844 mine_transaction(&nodes[1], &revoked_local_txn[0]);
4845 check_closed_broadcast!(nodes[1], true);
4846 check_added_monitors!(nodes[1], 1);
4847 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4848 let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4850 assert_eq!(revoked_htlc_txn.len(), 1);
4851 assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4852 assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4853 check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4855 // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4856 let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4857 assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4859 // A will generate justice tx from B's revoked commitment/HTLC tx
4860 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4861 check_closed_broadcast!(nodes[0], true);
4862 check_added_monitors!(nodes[0], 1);
4863 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4865 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4866 assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4868 // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4869 // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4870 // transactions next...
4871 assert_eq!(node_txn[0].input.len(), 2);
4872 check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4873 if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4874 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4876 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4877 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4880 assert_eq!(node_txn[1].input.len(), 1);
4881 check_spends!(node_txn[1], revoked_htlc_txn[0]);
4883 mine_transaction(&nodes[0], &node_txn[1]);
4884 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4886 // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4887 // didn't try to generate any new transactions.
4889 // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4890 let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4891 assert_eq!(spend_txn.len(), 3);
4892 assert_eq!(spend_txn[0].input.len(), 1);
4893 check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4894 assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4895 check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4896 check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4900 fn test_onchain_to_onchain_claim() {
4901 // Test that in case of channel closure, we detect the state of output and claim HTLC
4902 // on downstream peer's remote commitment tx.
4903 // First, have C claim an HTLC against its own latest commitment transaction.
4904 // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4906 // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4909 let chanmon_cfgs = create_chanmon_cfgs(3);
4910 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4911 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4912 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4914 // Create some initial channels
4915 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4916 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4918 // Ensure all nodes are at the same height
4919 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4920 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4921 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4922 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4924 // Rebalance the network a bit by relaying one payment through all the channels ...
4925 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4926 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4928 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4929 let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4930 check_spends!(commitment_tx[0], chan_2.3);
4931 nodes[2].node.claim_funds(payment_preimage);
4932 expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4933 check_added_monitors!(nodes[2], 1);
4934 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4935 assert!(updates.update_add_htlcs.is_empty());
4936 assert!(updates.update_fail_htlcs.is_empty());
4937 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4938 assert!(updates.update_fail_malformed_htlcs.is_empty());
4940 mine_transaction(&nodes[2], &commitment_tx[0]);
4941 check_closed_broadcast!(nodes[2], true);
4942 check_added_monitors!(nodes[2], 1);
4943 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4945 let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4946 assert_eq!(c_txn.len(), 1);
4947 check_spends!(c_txn[0], commitment_tx[0]);
4948 assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4949 assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
4950 assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx
4952 // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4953 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
4954 check_added_monitors!(nodes[1], 1);
4955 let events = nodes[1].node.get_and_clear_pending_events();
4956 assert_eq!(events.len(), 2);
4958 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4959 _ => panic!("Unexpected event"),
4962 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
4963 next_channel_id, outbound_amount_forwarded_msat, ..
4965 assert_eq!(total_fee_earned_msat, Some(1000));
4966 assert_eq!(prev_channel_id, Some(chan_1.2));
4967 assert_eq!(claim_from_onchain_tx, true);
4968 assert_eq!(next_channel_id, Some(chan_2.2));
4969 assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4971 _ => panic!("Unexpected event"),
4973 check_added_monitors!(nodes[1], 1);
4974 let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4975 assert_eq!(msg_events.len(), 3);
4976 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4977 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4979 match nodes_2_event {
4980 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
4981 _ => panic!("Unexpected event"),
4984 match nodes_0_event {
4985 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4986 assert!(update_add_htlcs.is_empty());
4987 assert!(update_fail_htlcs.is_empty());
4988 assert_eq!(update_fulfill_htlcs.len(), 1);
4989 assert!(update_fail_malformed_htlcs.is_empty());
4990 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4992 _ => panic!("Unexpected event"),
4995 // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
4996 match msg_events[0] {
4997 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4998 _ => panic!("Unexpected event"),
5001 // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
5002 let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5003 mine_transaction(&nodes[1], &commitment_tx[0]);
5004 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5005 let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5006 // ChannelMonitor: HTLC-Success tx
5007 assert_eq!(b_txn.len(), 1);
5008 check_spends!(b_txn[0], commitment_tx[0]);
5009 assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5010 assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
5011 assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx
5013 check_closed_broadcast!(nodes[1], true);
5014 check_added_monitors!(nodes[1], 1);
5018 fn test_duplicate_payment_hash_one_failure_one_success() {
5019 // Topology : A --> B --> C --> D
5020 // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
5021 // Note that because C will refuse to generate two payment secrets for the same payment hash,
5022 // we forward one of the payments onwards to D.
5023 let chanmon_cfgs = create_chanmon_cfgs(4);
5024 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
5025 // When this test was written, the default base fee floated based on the HTLC count.
5026 // It is now fixed, so we simply set the fee to the expected value here.
5027 let mut config = test_default_channel_config();
5028 config.channel_config.forwarding_fee_base_msat = 196;
5029 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
5030 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5031 let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
5033 create_announced_chan_between_nodes(&nodes, 0, 1);
5034 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5035 create_announced_chan_between_nodes(&nodes, 2, 3);
5037 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5038 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5039 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5040 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5041 connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
5043 let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
5045 let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
5046 // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
5047 // script push size limit so that the below script length checks match
5048 // ACCEPTED_HTLC_SCRIPT_WEIGHT.
5049 let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
5050 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
5051 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
5052 send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
5054 let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
5055 assert_eq!(commitment_txn[0].input.len(), 1);
5056 check_spends!(commitment_txn[0], chan_2.3);
5058 mine_transaction(&nodes[1], &commitment_txn[0]);
5059 check_closed_broadcast!(nodes[1], true);
5060 check_added_monitors!(nodes[1], 1);
5061 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
5062 connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
5064 let htlc_timeout_tx;
5065 { // Extract one of the two HTLC-Timeout transaction
5066 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5067 // ChannelMonitor: timeout tx * 2-or-3
5068 assert!(node_txn.len() == 2 || node_txn.len() == 3);
5070 check_spends!(node_txn[0], commitment_txn[0]);
5071 assert_eq!(node_txn[0].input.len(), 1);
5072 assert_eq!(node_txn[0].output.len(), 1);
5074 if node_txn.len() > 2 {
5075 check_spends!(node_txn[1], commitment_txn[0]);
5076 assert_eq!(node_txn[1].input.len(), 1);
5077 assert_eq!(node_txn[1].output.len(), 1);
5078 assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5080 check_spends!(node_txn[2], commitment_txn[0]);
5081 assert_eq!(node_txn[2].input.len(), 1);
5082 assert_eq!(node_txn[2].output.len(), 1);
5083 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
5085 check_spends!(node_txn[1], commitment_txn[0]);
5086 assert_eq!(node_txn[1].input.len(), 1);
5087 assert_eq!(node_txn[1].output.len(), 1);
5088 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5091 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5092 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5093 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
5094 // (with value 900 sats) will be claimed in the below `claim_funds` call.
5095 if node_txn.len() > 2 {
5096 assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5097 htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
5099 htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
5103 nodes[2].node.claim_funds(our_payment_preimage);
5104 expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
5106 mine_transaction(&nodes[2], &commitment_txn[0]);
5107 check_added_monitors!(nodes[2], 2);
5108 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5109 let events = nodes[2].node.get_and_clear_pending_msg_events();
5111 MessageSendEvent::UpdateHTLCs { .. } => {},
5112 _ => panic!("Unexpected event"),
5115 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5116 _ => panic!("Unexepected event"),
5118 let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5119 assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
5120 check_spends!(htlc_success_txn[0], commitment_txn[0]);
5121 check_spends!(htlc_success_txn[1], commitment_txn[0]);
5122 assert_eq!(htlc_success_txn[0].input.len(), 1);
5123 assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5124 assert_eq!(htlc_success_txn[1].input.len(), 1);
5125 assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5126 assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
5127 assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
5129 mine_transaction(&nodes[1], &htlc_timeout_tx);
5130 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5131 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
5132 let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5133 assert!(htlc_updates.update_add_htlcs.is_empty());
5134 assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
5135 let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
5136 assert!(htlc_updates.update_fulfill_htlcs.is_empty());
5137 assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
5138 check_added_monitors!(nodes[1], 1);
5140 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
5141 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5143 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
5145 expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
5147 // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5148 mine_transaction(&nodes[1], &htlc_success_txn[1]);
5149 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
5150 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5151 assert!(updates.update_add_htlcs.is_empty());
5152 assert!(updates.update_fail_htlcs.is_empty());
5153 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5154 assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5155 assert!(updates.update_fail_malformed_htlcs.is_empty());
5156 check_added_monitors!(nodes[1], 1);
5158 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5159 commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5160 expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
5164 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5165 let chanmon_cfgs = create_chanmon_cfgs(2);
5166 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5167 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5168 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5170 // Create some initial channels
5171 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5173 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
5174 let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5175 assert_eq!(local_txn.len(), 1);
5176 assert_eq!(local_txn[0].input.len(), 1);
5177 check_spends!(local_txn[0], chan_1.3);
5179 // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5180 nodes[1].node.claim_funds(payment_preimage);
5181 expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
5182 check_added_monitors!(nodes[1], 1);
5184 mine_transaction(&nodes[1], &local_txn[0]);
5185 check_added_monitors!(nodes[1], 1);
5186 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5187 let events = nodes[1].node.get_and_clear_pending_msg_events();
5189 MessageSendEvent::UpdateHTLCs { .. } => {},
5190 _ => panic!("Unexpected event"),
5193 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5194 _ => panic!("Unexepected event"),
5197 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5198 assert_eq!(node_txn.len(), 1);
5199 assert_eq!(node_txn[0].input.len(), 1);
5200 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5201 check_spends!(node_txn[0], local_txn[0]);
5205 mine_transaction(&nodes[1], &node_tx);
5206 connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5208 // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5209 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5210 assert_eq!(spend_txn.len(), 1);
5211 assert_eq!(spend_txn[0].input.len(), 1);
5212 check_spends!(spend_txn[0], node_tx);
5213 assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5216 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5217 // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5218 // unrevoked commitment transaction.
5219 // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5220 // a remote RAA before they could be failed backwards (and combinations thereof).
5221 // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5222 // use the same payment hashes.
5223 // Thus, we use a six-node network:
5228 // And test where C fails back to A/B when D announces its latest commitment transaction
5229 let chanmon_cfgs = create_chanmon_cfgs(6);
5230 let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5231 // When this test was written, the default base fee floated based on the HTLC count.
5232 // It is now fixed, so we simply set the fee to the expected value here.
5233 let mut config = test_default_channel_config();
5234 config.channel_config.forwarding_fee_base_msat = 196;
5235 let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5236 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5237 let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5239 let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
5240 let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5241 let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5242 let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5243 let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5);
5245 // Rebalance and check output sanity...
5246 send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5247 send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5248 assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
5250 let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
5251 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
5253 let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5255 let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5256 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5258 send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5260 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5262 let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5264 let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5265 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5267 send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5269 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5272 let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5274 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5275 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5278 let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5280 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5281 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5283 // Double-check that six of the new HTLC were added
5284 // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5285 // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5286 assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5287 assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5289 // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5290 // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5291 nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5292 nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5293 nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5294 nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5295 check_added_monitors!(nodes[4], 0);
5297 let failed_destinations = vec![
5298 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5299 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5300 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5301 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5303 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5304 check_added_monitors!(nodes[4], 1);
5306 let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5307 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5308 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5309 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5310 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5311 commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5313 // Fail 3rd below-dust and 7th above-dust HTLCs
5314 nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5315 nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5316 check_added_monitors!(nodes[5], 0);
5318 let failed_destinations_2 = vec![
5319 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5320 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5322 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5323 check_added_monitors!(nodes[5], 1);
5325 let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5326 nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5327 nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5328 commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5330 let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5332 // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5333 let failed_destinations_3 = vec![
5334 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5335 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5336 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5337 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5338 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5339 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5341 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5342 check_added_monitors!(nodes[3], 1);
5343 let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5344 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5345 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5346 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5347 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5348 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5349 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5350 if deliver_last_raa {
5351 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5353 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5356 // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5357 // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5358 // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5359 // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5361 // We now broadcast the latest commitment transaction, which *should* result in failures for
5362 // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5363 // the non-broadcast above-dust HTLCs.
5365 // Alternatively, we may broadcast the previous commitment transaction, which should only
5366 // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5367 let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5369 if announce_latest {
5370 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5372 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5374 let events = nodes[2].node.get_and_clear_pending_events();
5375 let close_event = if deliver_last_raa {
5376 assert_eq!(events.len(), 2 + 6);
5377 events.last().clone().unwrap()
5379 assert_eq!(events.len(), 1);
5380 events.last().clone().unwrap()
5383 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5384 _ => panic!("Unexpected event"),
5387 connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5388 check_closed_broadcast!(nodes[2], true);
5389 if deliver_last_raa {
5390 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
5392 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5393 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5395 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5396 repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5398 repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5401 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5403 check_added_monitors!(nodes[2], 3);
5405 let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5406 assert_eq!(cs_msgs.len(), 2);
5407 let mut a_done = false;
5408 for msg in cs_msgs {
5410 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5411 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5412 // should be failed-backwards here.
5413 let target = if *node_id == nodes[0].node.get_our_node_id() {
5414 // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5415 for htlc in &updates.update_fail_htlcs {
5416 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5418 assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5423 // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5424 for htlc in &updates.update_fail_htlcs {
5425 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5427 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5428 assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5431 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5432 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5433 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5434 if announce_latest {
5435 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5436 if *node_id == nodes[0].node.get_our_node_id() {
5437 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5440 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5442 _ => panic!("Unexpected event"),
5446 let as_events = nodes[0].node.get_and_clear_pending_events();
5447 assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5448 let mut as_faileds = new_hash_set();
5449 let mut as_updates = 0;
5450 for event in as_events.iter() {
5451 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5452 assert!(as_faileds.insert(*payment_hash));
5453 if *payment_hash != payment_hash_2 {
5454 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5456 assert!(!payment_failed_permanently);
5458 if let PathFailure::OnPath { network_update: Some(_) } = failure {
5461 } else if let &Event::PaymentFailed { .. } = event {
5462 } else { panic!("Unexpected event"); }
5464 assert!(as_faileds.contains(&payment_hash_1));
5465 assert!(as_faileds.contains(&payment_hash_2));
5466 if announce_latest {
5467 assert!(as_faileds.contains(&payment_hash_3));
5468 assert!(as_faileds.contains(&payment_hash_5));
5470 assert!(as_faileds.contains(&payment_hash_6));
5472 let bs_events = nodes[1].node.get_and_clear_pending_events();
5473 assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5474 let mut bs_faileds = new_hash_set();
5475 let mut bs_updates = 0;
5476 for event in bs_events.iter() {
5477 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5478 assert!(bs_faileds.insert(*payment_hash));
5479 if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5480 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5482 assert!(!payment_failed_permanently);
5484 if let PathFailure::OnPath { network_update: Some(_) } = failure {
5487 } else if let &Event::PaymentFailed { .. } = event {
5488 } else { panic!("Unexpected event"); }
5490 assert!(bs_faileds.contains(&payment_hash_1));
5491 assert!(bs_faileds.contains(&payment_hash_2));
5492 if announce_latest {
5493 assert!(bs_faileds.contains(&payment_hash_4));
5495 assert!(bs_faileds.contains(&payment_hash_5));
5497 // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5498 // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5499 // unknown-preimage-etc, B should have gotten 2. Thus, in the
5500 // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5501 assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5502 assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5506 fn test_fail_backwards_latest_remote_announce_a() {
5507 do_test_fail_backwards_unrevoked_remote_announce(false, true);
5511 fn test_fail_backwards_latest_remote_announce_b() {
5512 do_test_fail_backwards_unrevoked_remote_announce(true, true);
5516 fn test_fail_backwards_previous_remote_announce() {
5517 do_test_fail_backwards_unrevoked_remote_announce(false, false);
5518 // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5519 // tested for in test_commitment_revoked_fail_backward_exhaustive()
5523 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5524 let chanmon_cfgs = create_chanmon_cfgs(2);
5525 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5526 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5527 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5529 // Create some initial channels
5530 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5532 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5533 let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5534 assert_eq!(local_txn[0].input.len(), 1);
5535 check_spends!(local_txn[0], chan_1.3);
5537 // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5538 mine_transaction(&nodes[0], &local_txn[0]);
5539 check_closed_broadcast!(nodes[0], true);
5540 check_added_monitors!(nodes[0], 1);
5541 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5542 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5544 let htlc_timeout = {
5545 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5546 assert_eq!(node_txn.len(), 1);
5547 assert_eq!(node_txn[0].input.len(), 1);
5548 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5549 check_spends!(node_txn[0], local_txn[0]);
5553 mine_transaction(&nodes[0], &htlc_timeout);
5554 connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5555 expect_payment_failed!(nodes[0], our_payment_hash, false);
5557 // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5558 let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5559 assert_eq!(spend_txn.len(), 3);
5560 check_spends!(spend_txn[0], local_txn[0]);
5561 assert_eq!(spend_txn[1].input.len(), 1);
5562 check_spends!(spend_txn[1], htlc_timeout);
5563 assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5564 assert_eq!(spend_txn[2].input.len(), 2);
5565 check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5566 assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5567 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5571 fn test_key_derivation_params() {
5572 // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5573 // manager rotation to test that `channel_keys_id` returned in
5574 // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5575 // then derive a `delayed_payment_key`.
5577 let chanmon_cfgs = create_chanmon_cfgs(3);
5579 // We manually create the node configuration to backup the seed.
5580 let seed = [42; 32];
5581 let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5582 let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5583 let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5584 let scorer = RwLock::new(test_utils::TestScorer::new());
5585 let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
5586 let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
5587 let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5588 let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5589 node_cfgs.remove(0);
5590 node_cfgs.insert(0, node);
5592 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5593 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5595 // Create some initial channels
5596 // Create a dummy channel to advance index by one and thus test re-derivation correctness
5598 let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5599 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5600 assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5602 // Ensure all nodes are at the same height
5603 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5604 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5605 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5606 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5608 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5609 let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5610 let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5611 assert_eq!(local_txn_1[0].input.len(), 1);
5612 check_spends!(local_txn_1[0], chan_1.3);
5614 // We check funding pubkey are unique
5615 let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5616 let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5617 if from_0_funding_key_0 == from_1_funding_key_0
5618 || from_0_funding_key_0 == from_1_funding_key_1
5619 || from_0_funding_key_1 == from_1_funding_key_0
5620 || from_0_funding_key_1 == from_1_funding_key_1 {
5621 panic!("Funding pubkeys aren't unique");
5624 // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5625 mine_transaction(&nodes[0], &local_txn_1[0]);
5626 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5627 check_closed_broadcast!(nodes[0], true);
5628 check_added_monitors!(nodes[0], 1);
5629 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5631 let htlc_timeout = {
5632 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5633 assert_eq!(node_txn.len(), 1);
5634 assert_eq!(node_txn[0].input.len(), 1);
5635 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5636 check_spends!(node_txn[0], local_txn_1[0]);
5640 mine_transaction(&nodes[0], &htlc_timeout);
5641 connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5642 expect_payment_failed!(nodes[0], our_payment_hash, false);
5644 // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5645 let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5646 let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5647 assert_eq!(spend_txn.len(), 3);
5648 check_spends!(spend_txn[0], local_txn_1[0]);
5649 assert_eq!(spend_txn[1].input.len(), 1);
5650 check_spends!(spend_txn[1], htlc_timeout);
5651 assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5652 assert_eq!(spend_txn[2].input.len(), 2);
5653 check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5654 assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5655 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5659 fn test_static_output_closing_tx() {
5660 let chanmon_cfgs = create_chanmon_cfgs(2);
5661 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5662 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5663 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5665 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5667 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5668 let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5670 mine_transaction(&nodes[0], &closing_tx);
5671 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
5672 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5674 let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5675 assert_eq!(spend_txn.len(), 1);
5676 check_spends!(spend_txn[0], closing_tx);
5678 mine_transaction(&nodes[1], &closing_tx);
5679 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
5680 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5682 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5683 assert_eq!(spend_txn.len(), 1);
5684 check_spends!(spend_txn[0], closing_tx);
5687 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5688 let chanmon_cfgs = create_chanmon_cfgs(2);
5689 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5690 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5691 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5692 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5694 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5696 // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5697 // present in B's local commitment transaction, but none of A's commitment transactions.
5698 nodes[1].node.claim_funds(payment_preimage);
5699 check_added_monitors!(nodes[1], 1);
5700 expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5702 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5703 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5704 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
5706 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5707 check_added_monitors!(nodes[0], 1);
5708 let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5709 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5710 check_added_monitors!(nodes[1], 1);
5712 let starting_block = nodes[1].best_block_info();
5713 let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5714 for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5715 connect_block(&nodes[1], &block);
5716 block.header.prev_blockhash = block.block_hash();
5718 test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5719 check_closed_broadcast!(nodes[1], true);
5720 check_added_monitors!(nodes[1], 1);
5721 check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
5724 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5725 let chanmon_cfgs = create_chanmon_cfgs(2);
5726 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5727 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5728 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5729 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5731 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5732 nodes[0].node.send_payment_with_route(&route, payment_hash,
5733 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5734 check_added_monitors!(nodes[0], 1);
5736 let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5738 // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5739 // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5740 // to "time out" the HTLC.
5742 let starting_block = nodes[1].best_block_info();
5743 let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5745 for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5746 connect_block(&nodes[0], &block);
5747 block.header.prev_blockhash = block.block_hash();
5749 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5750 check_closed_broadcast!(nodes[0], true);
5751 check_added_monitors!(nodes[0], 1);
5752 check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5755 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5756 let chanmon_cfgs = create_chanmon_cfgs(3);
5757 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5758 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5759 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5760 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5762 // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5763 // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5764 // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5765 // actually revoked.
5766 let htlc_value = if use_dust { 50000 } else { 3000000 };
5767 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5768 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5769 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5770 check_added_monitors!(nodes[1], 1);
5772 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5773 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5774 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5775 check_added_monitors!(nodes[0], 1);
5776 let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5777 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5778 check_added_monitors!(nodes[1], 1);
5779 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5780 check_added_monitors!(nodes[1], 1);
5781 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5783 if check_revoke_no_close {
5784 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5785 check_added_monitors!(nodes[0], 1);
5788 let starting_block = nodes[1].best_block_info();
5789 let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5790 for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5791 connect_block(&nodes[0], &block);
5792 block.header.prev_blockhash = block.block_hash();
5794 if !check_revoke_no_close {
5795 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5796 check_closed_broadcast!(nodes[0], true);
5797 check_added_monitors!(nodes[0], 1);
5798 check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5800 expect_payment_failed!(nodes[0], our_payment_hash, true);
5804 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5805 // There are only a few cases to test here:
5806 // * its not really normative behavior, but we test that below-dust HTLCs "included" in
5807 // broadcastable commitment transactions result in channel closure,
5808 // * its included in an unrevoked-but-previous remote commitment transaction,
5809 // * its included in the latest remote or local commitment transactions.
5810 // We test each of the three possible commitment transactions individually and use both dust and
5812 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5813 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5814 // tested for at least one of the cases in other tests.
5816 fn htlc_claim_single_commitment_only_a() {
5817 do_htlc_claim_local_commitment_only(true);
5818 do_htlc_claim_local_commitment_only(false);
5820 do_htlc_claim_current_remote_commitment_only(true);
5821 do_htlc_claim_current_remote_commitment_only(false);
5825 fn htlc_claim_single_commitment_only_b() {
5826 do_htlc_claim_previous_remote_commitment_only(true, false);
5827 do_htlc_claim_previous_remote_commitment_only(false, false);
5828 do_htlc_claim_previous_remote_commitment_only(true, true);
5829 do_htlc_claim_previous_remote_commitment_only(false, true);
5834 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5835 let chanmon_cfgs = create_chanmon_cfgs(2);
5836 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5837 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5838 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5839 // Force duplicate randomness for every get-random call
5840 for node in nodes.iter() {
5841 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5844 // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5845 let channel_value_satoshis=10000;
5846 let push_msat=10001;
5847 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5848 let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5849 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5850 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5852 // Create a second channel with the same random values. This used to panic due to a colliding
5853 // channel_id, but now panics due to a colliding outbound SCID alias.
5854 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5858 fn bolt2_open_channel_sending_node_checks_part2() {
5859 let chanmon_cfgs = create_chanmon_cfgs(2);
5860 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5861 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5862 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5864 // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5865 let channel_value_satoshis=2^24;
5866 let push_msat=10001;
5867 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5869 // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5870 let channel_value_satoshis=10000;
5871 // Test when push_msat is equal to 1000 * funding_satoshis.
5872 let push_msat=1000*channel_value_satoshis+1;
5873 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5875 // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5876 let channel_value_satoshis=10000;
5877 let push_msat=10001;
5878 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
5879 let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5880 assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
5882 // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5883 // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5884 assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
5886 // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5887 assert!(BREAKDOWN_TIMEOUT>0);
5888 assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
5890 // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5891 let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
5892 assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
5894 // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5895 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
5896 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
5897 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
5898 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
5899 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
5903 fn bolt2_open_channel_sane_dust_limit() {
5904 let chanmon_cfgs = create_chanmon_cfgs(2);
5905 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5906 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5907 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5909 let channel_value_satoshis=1000000;
5910 let push_msat=10001;
5911 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5912 let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5913 node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
5914 node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5916 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5917 let events = nodes[1].node.get_and_clear_pending_msg_events();
5918 let err_msg = match events[0] {
5919 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5922 _ => panic!("Unexpected event"),
5924 assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5927 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5928 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5929 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5930 // is no longer affordable once it's freed.
5932 fn test_fail_holding_cell_htlc_upon_free() {
5933 let chanmon_cfgs = create_chanmon_cfgs(2);
5934 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5935 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5936 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5937 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5939 // First nodes[0] generates an update_fee, setting the channel's
5940 // pending_update_fee.
5942 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5943 *feerate_lock += 20;
5945 nodes[0].node.timer_tick_occurred();
5946 check_added_monitors!(nodes[0], 1);
5948 let events = nodes[0].node.get_and_clear_pending_msg_events();
5949 assert_eq!(events.len(), 1);
5950 let (update_msg, commitment_signed) = match events[0] {
5951 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5952 (update_fee.as_ref(), commitment_signed)
5954 _ => panic!("Unexpected event"),
5957 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5959 let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5960 let channel_reserve = chan_stat.channel_reserve_msat;
5961 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5962 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5964 // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5965 let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5966 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5968 // Send a payment which passes reserve checks but gets stuck in the holding cell.
5969 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5970 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5971 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5972 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5974 // Flush the pending fee update.
5975 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5976 let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5977 check_added_monitors!(nodes[1], 1);
5978 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5979 check_added_monitors!(nodes[0], 1);
5981 // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5982 // HTLC, but now that the fee has been raised the payment will now fail, causing
5983 // us to surface its failure to the user.
5984 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5985 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5986 nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
5988 // Check that the payment failed to be sent out.
5989 let events = nodes[0].node.get_and_clear_pending_events();
5990 assert_eq!(events.len(), 2);
5992 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5993 assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
5994 assert_eq!(our_payment_hash.clone(), *payment_hash);
5995 assert_eq!(*payment_failed_permanently, false);
5996 assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id));
5998 _ => panic!("Unexpected event"),
6001 &Event::PaymentFailed { ref payment_hash, .. } => {
6002 assert_eq!(our_payment_hash.clone(), *payment_hash);
6004 _ => panic!("Unexpected event"),
6008 // Test that if multiple HTLCs are released from the holding cell and one is
6009 // valid but the other is no longer valid upon release, the valid HTLC can be
6010 // successfully completed while the other one fails as expected.
6012 fn test_free_and_fail_holding_cell_htlcs() {
6013 let chanmon_cfgs = create_chanmon_cfgs(2);
6014 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6015 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6016 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6017 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6019 // First nodes[0] generates an update_fee, setting the channel's
6020 // pending_update_fee.
6022 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
6023 *feerate_lock += 200;
6025 nodes[0].node.timer_tick_occurred();
6026 check_added_monitors!(nodes[0], 1);
6028 let events = nodes[0].node.get_and_clear_pending_msg_events();
6029 assert_eq!(events.len(), 1);
6030 let (update_msg, commitment_signed) = match events[0] {
6031 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6032 (update_fee.as_ref(), commitment_signed)
6034 _ => panic!("Unexpected event"),
6037 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
6039 let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6040 let channel_reserve = chan_stat.channel_reserve_msat;
6041 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6042 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6044 // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6046 let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
6047 let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
6048 let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
6050 // Send 2 payments which pass reserve checks but get stuck in the holding cell.
6051 nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
6052 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
6053 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6054 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
6055 let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
6056 nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
6057 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
6058 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6059 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
6061 // Flush the pending fee update.
6062 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6063 let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6064 check_added_monitors!(nodes[1], 1);
6065 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
6066 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6067 check_added_monitors!(nodes[0], 2);
6069 // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
6070 // but now that the fee has been raised the second payment will now fail, causing us
6071 // to surface its failure to the user. The first payment should succeed.
6072 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6073 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6074 nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
6076 // Check that the second payment failed to be sent out.
6077 let events = nodes[0].node.get_and_clear_pending_events();
6078 assert_eq!(events.len(), 2);
6080 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
6081 assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
6082 assert_eq!(payment_hash_2.clone(), *payment_hash);
6083 assert_eq!(*payment_failed_permanently, false);
6084 assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id));
6086 _ => panic!("Unexpected event"),
6089 &Event::PaymentFailed { ref payment_hash, .. } => {
6090 assert_eq!(payment_hash_2.clone(), *payment_hash);
6092 _ => panic!("Unexpected event"),
6095 // Complete the first payment and the RAA from the fee update.
6096 let (payment_event, send_raa_event) = {
6097 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
6098 assert_eq!(msgs.len(), 2);
6099 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
6101 let raa = match send_raa_event {
6102 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
6103 _ => panic!("Unexpected event"),
6105 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6106 check_added_monitors!(nodes[1], 1);
6107 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6108 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6109 let events = nodes[1].node.get_and_clear_pending_events();
6110 assert_eq!(events.len(), 1);
6112 Event::PendingHTLCsForwardable { .. } => {},
6113 _ => panic!("Unexpected event"),
6115 nodes[1].node.process_pending_htlc_forwards();
6116 let events = nodes[1].node.get_and_clear_pending_events();
6117 assert_eq!(events.len(), 1);
6119 Event::PaymentClaimable { .. } => {},
6120 _ => panic!("Unexpected event"),
6122 nodes[1].node.claim_funds(payment_preimage_1);
6123 check_added_monitors!(nodes[1], 1);
6124 expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
6126 let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6127 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
6128 commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
6129 expect_payment_sent!(nodes[0], payment_preimage_1);
6132 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
6133 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
6134 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
6137 fn test_fail_holding_cell_htlc_upon_free_multihop() {
6138 let chanmon_cfgs = create_chanmon_cfgs(3);
6139 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6140 // Avoid having to include routing fees in calculations
6141 let mut config = test_default_channel_config();
6142 config.channel_config.forwarding_fee_base_msat = 0;
6143 config.channel_config.forwarding_fee_proportional_millionths = 0;
6144 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
6145 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6146 let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6147 let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
6149 // First nodes[1] generates an update_fee, setting the channel's
6150 // pending_update_fee.
6152 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
6153 *feerate_lock += 20;
6155 nodes[1].node.timer_tick_occurred();
6156 check_added_monitors!(nodes[1], 1);
6158 let events = nodes[1].node.get_and_clear_pending_msg_events();
6159 assert_eq!(events.len(), 1);
6160 let (update_msg, commitment_signed) = match events[0] {
6161 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6162 (update_fee.as_ref(), commitment_signed)
6164 _ => panic!("Unexpected event"),
6167 nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6169 let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
6170 let channel_reserve = chan_stat.channel_reserve_msat;
6171 let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
6172 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
6174 // Send a payment which passes reserve checks but gets stuck in the holding cell.
6175 let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6176 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
6177 let payment_event = {
6178 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6179 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6180 check_added_monitors!(nodes[0], 1);
6182 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6183 assert_eq!(events.len(), 1);
6185 SendEvent::from_event(events.remove(0))
6187 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6188 check_added_monitors!(nodes[1], 0);
6189 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6190 expect_pending_htlcs_forwardable!(nodes[1]);
6192 chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
6193 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6195 // Flush the pending fee update.
6196 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6197 let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6198 check_added_monitors!(nodes[2], 1);
6199 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6200 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6201 check_added_monitors!(nodes[1], 2);
6203 // A final RAA message is generated to finalize the fee update.
6204 let events = nodes[1].node.get_and_clear_pending_msg_events();
6205 assert_eq!(events.len(), 1);
6207 let raa_msg = match &events[0] {
6208 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6211 _ => panic!("Unexpected event"),
6214 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6215 check_added_monitors!(nodes[2], 1);
6216 assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6218 // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6219 let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6220 assert_eq!(process_htlc_forwards_event.len(), 2);
6221 match &process_htlc_forwards_event[1] {
6222 &Event::PendingHTLCsForwardable { .. } => {},
6223 _ => panic!("Unexpected event"),
6226 // In response, we call ChannelManager's process_pending_htlc_forwards
6227 nodes[1].node.process_pending_htlc_forwards();
6228 check_added_monitors!(nodes[1], 1);
6230 // This causes the HTLC to be failed backwards.
6231 let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6232 assert_eq!(fail_event.len(), 1);
6233 let (fail_msg, commitment_signed) = match &fail_event[0] {
6234 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6235 assert_eq!(updates.update_add_htlcs.len(), 0);
6236 assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6237 assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6238 assert_eq!(updates.update_fail_htlcs.len(), 1);
6239 (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6241 _ => panic!("Unexpected event"),
6244 // Pass the failure messages back to nodes[0].
6245 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6246 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6248 // Complete the HTLC failure+removal process.
6249 let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6250 check_added_monitors!(nodes[0], 1);
6251 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6252 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6253 check_added_monitors!(nodes[1], 2);
6254 let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6255 assert_eq!(final_raa_event.len(), 1);
6256 let raa = match &final_raa_event[0] {
6257 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6258 _ => panic!("Unexpected event"),
6260 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6261 expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6262 check_added_monitors!(nodes[0], 1);
6266 fn test_payment_route_reaching_same_channel_twice() {
6267 //A route should not go through the same channel twice
6268 //It is enforced when constructing a route.
6269 let chanmon_cfgs = create_chanmon_cfgs(2);
6270 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6271 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6272 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6273 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6275 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6276 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6277 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6279 // Extend the path by itself, essentially simulating route going through same channel twice
6280 let cloned_hops = route.paths[0].hops.clone();
6281 route.paths[0].hops.extend_from_slice(&cloned_hops);
6283 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6284 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6285 ), false, APIError::InvalidRoute { ref err },
6286 assert_eq!(err, &"Path went through the same channel twice"));
6289 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6290 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6291 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6294 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6295 //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6296 let chanmon_cfgs = create_chanmon_cfgs(2);
6297 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6298 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6299 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6300 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6302 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6303 route.paths[0].hops[0].fee_msat = 100;
6305 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6306 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6307 ), true, APIError::ChannelUnavailable { .. }, {});
6308 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6312 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6313 //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6314 let chanmon_cfgs = create_chanmon_cfgs(2);
6315 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6316 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6317 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6318 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6320 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6321 route.paths[0].hops[0].fee_msat = 0;
6322 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6323 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6324 true, APIError::ChannelUnavailable { ref err },
6325 assert_eq!(err, "Cannot send 0-msat HTLC"));
6327 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6328 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6332 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6333 //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6334 let chanmon_cfgs = create_chanmon_cfgs(2);
6335 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6336 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6337 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6338 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6340 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6341 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6342 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6343 check_added_monitors!(nodes[0], 1);
6344 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6345 updates.update_add_htlcs[0].amount_msat = 0;
6347 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6348 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3);
6349 check_closed_broadcast!(nodes[1], true).unwrap();
6350 check_added_monitors!(nodes[1], 1);
6351 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
6352 [nodes[0].node.get_our_node_id()], 100000);
6356 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6357 //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6358 //It is enforced when constructing a route.
6359 let chanmon_cfgs = create_chanmon_cfgs(2);
6360 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6361 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6362 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6363 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6365 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6366 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6367 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6368 route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
6369 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6370 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6371 ), true, APIError::InvalidRoute { ref err },
6372 assert_eq!(err, &"Channel CLTV overflowed?"));
6376 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6377 //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6378 //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6379 //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6380 let chanmon_cfgs = create_chanmon_cfgs(2);
6381 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6382 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6383 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6384 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6385 let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6386 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
6388 // Fetch a route in advance as we will be unable to once we're unable to send.
6389 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6390 for i in 0..max_accepted_htlcs {
6391 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6392 let payment_event = {
6393 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6394 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6395 check_added_monitors!(nodes[0], 1);
6397 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6398 assert_eq!(events.len(), 1);
6399 if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6400 assert_eq!(htlcs[0].htlc_id, i);
6404 SendEvent::from_event(events.remove(0))
6406 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6407 check_added_monitors!(nodes[1], 0);
6408 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6410 expect_pending_htlcs_forwardable!(nodes[1]);
6411 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6413 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6414 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6415 ), true, APIError::ChannelUnavailable { .. }, {});
6417 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6421 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6422 //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6423 let chanmon_cfgs = create_chanmon_cfgs(2);
6424 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6425 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6426 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6427 let channel_value = 100000;
6428 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6429 let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6431 send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6433 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6434 // Manually create a route over our max in flight (which our router normally automatically
6436 route.paths[0].hops[0].fee_msat = max_in_flight + 1;
6437 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6438 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6439 ), true, APIError::ChannelUnavailable { .. }, {});
6440 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6442 send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6445 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6447 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6448 //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6449 let chanmon_cfgs = create_chanmon_cfgs(2);
6450 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6451 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6452 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6453 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6454 let htlc_minimum_msat: u64;
6456 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6457 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6458 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6459 htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
6462 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6463 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6464 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6465 check_added_monitors!(nodes[0], 1);
6466 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6467 updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6468 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6469 assert!(nodes[1].node.list_channels().is_empty());
6470 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6471 assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6472 check_added_monitors!(nodes[1], 1);
6473 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6477 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6478 //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6479 let chanmon_cfgs = create_chanmon_cfgs(2);
6480 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6481 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6482 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6483 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6485 let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6486 let channel_reserve = chan_stat.channel_reserve_msat;
6487 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6488 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6489 // The 2* and +1 are for the fee spike reserve.
6490 let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6492 let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6493 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6494 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6495 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6496 check_added_monitors!(nodes[0], 1);
6497 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6499 // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6500 // at this time channel-initiatee receivers are not required to enforce that senders
6501 // respect the fee_spike_reserve.
6502 updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6503 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6505 assert!(nodes[1].node.list_channels().is_empty());
6506 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6507 assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6508 check_added_monitors!(nodes[1], 1);
6509 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6513 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6514 //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6515 //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6516 let chanmon_cfgs = create_chanmon_cfgs(2);
6517 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6518 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6519 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6520 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6522 let send_amt = 3999999;
6523 let (mut route, our_payment_hash, _, our_payment_secret) =
6524 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
6525 route.paths[0].hops[0].fee_msat = send_amt;
6526 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6527 let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
6528 let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6529 let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
6530 let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
6531 &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None).unwrap();
6532 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
6534 let mut msg = msgs::UpdateAddHTLC {
6538 payment_hash: our_payment_hash,
6539 cltv_expiry: htlc_cltv,
6540 onion_routing_packet: onion_packet.clone(),
6541 skimmed_fee_msat: None,
6542 blinding_point: None,
6546 msg.htlc_id = i as u64;
6547 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6549 msg.htlc_id = (50) as u64;
6550 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6552 assert!(nodes[1].node.list_channels().is_empty());
6553 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6554 assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6555 check_added_monitors!(nodes[1], 1);
6556 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6560 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6561 //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6562 let chanmon_cfgs = create_chanmon_cfgs(2);
6563 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6564 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6565 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6566 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6568 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6569 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6570 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6571 check_added_monitors!(nodes[0], 1);
6572 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6573 updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6574 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6576 assert!(nodes[1].node.list_channels().is_empty());
6577 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6578 assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6579 check_added_monitors!(nodes[1], 1);
6580 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
6584 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6585 //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6586 let chanmon_cfgs = create_chanmon_cfgs(2);
6587 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6588 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6589 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6591 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6592 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6593 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6594 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6595 check_added_monitors!(nodes[0], 1);
6596 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6597 updates.update_add_htlcs[0].cltv_expiry = 500000000;
6598 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6600 assert!(nodes[1].node.list_channels().is_empty());
6601 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6602 assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6603 check_added_monitors!(nodes[1], 1);
6604 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6608 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6609 //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6610 // We test this by first testing that that repeated HTLCs pass commitment signature checks
6611 // after disconnect and that non-sequential htlc_ids result in a channel failure.
6612 let chanmon_cfgs = create_chanmon_cfgs(2);
6613 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6614 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6615 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6617 create_announced_chan_between_nodes(&nodes, 0, 1);
6618 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6619 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6620 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6621 check_added_monitors!(nodes[0], 1);
6622 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6623 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6625 //Disconnect and Reconnect
6626 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6627 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6628 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
6629 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
6631 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6632 assert_eq!(reestablish_1.len(), 1);
6633 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
6634 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
6636 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6637 assert_eq!(reestablish_2.len(), 1);
6638 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6639 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6640 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6641 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6644 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6645 assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6646 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6647 check_added_monitors!(nodes[1], 1);
6648 let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6650 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6652 assert!(nodes[1].node.list_channels().is_empty());
6653 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6654 assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6655 check_added_monitors!(nodes[1], 1);
6656 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6660 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6661 //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6663 let chanmon_cfgs = create_chanmon_cfgs(2);
6664 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6665 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6666 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6667 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6668 let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6669 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6670 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6672 check_added_monitors!(nodes[0], 1);
6673 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6674 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6676 let update_msg = msgs::UpdateFulfillHTLC{
6679 payment_preimage: our_payment_preimage,
6682 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6684 assert!(nodes[0].node.list_channels().is_empty());
6685 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6686 assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6687 check_added_monitors!(nodes[0], 1);
6688 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6692 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6693 //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6695 let chanmon_cfgs = create_chanmon_cfgs(2);
6696 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6697 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6698 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6699 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6701 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6702 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6703 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6704 check_added_monitors!(nodes[0], 1);
6705 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6706 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6708 let update_msg = msgs::UpdateFailHTLC{
6711 reason: msgs::OnionErrorPacket { data: Vec::new()},
6714 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6716 assert!(nodes[0].node.list_channels().is_empty());
6717 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6718 assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6719 check_added_monitors!(nodes[0], 1);
6720 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6724 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6725 //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6727 let chanmon_cfgs = create_chanmon_cfgs(2);
6728 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6729 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6730 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6731 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6733 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6734 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6735 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6736 check_added_monitors!(nodes[0], 1);
6737 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6738 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6739 let update_msg = msgs::UpdateFailMalformedHTLC{
6742 sha256_of_onion: [1; 32],
6743 failure_code: 0x8000,
6746 nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6748 assert!(nodes[0].node.list_channels().is_empty());
6749 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6750 assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6751 check_added_monitors!(nodes[0], 1);
6752 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6756 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6757 //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6759 let chanmon_cfgs = create_chanmon_cfgs(2);
6760 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6761 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6762 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6763 create_announced_chan_between_nodes(&nodes, 0, 1);
6765 let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6767 nodes[1].node.claim_funds(our_payment_preimage);
6768 check_added_monitors!(nodes[1], 1);
6769 expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6771 let events = nodes[1].node.get_and_clear_pending_msg_events();
6772 assert_eq!(events.len(), 1);
6773 let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6775 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6776 assert!(update_add_htlcs.is_empty());
6777 assert_eq!(update_fulfill_htlcs.len(), 1);
6778 assert!(update_fail_htlcs.is_empty());
6779 assert!(update_fail_malformed_htlcs.is_empty());
6780 assert!(update_fee.is_none());
6781 update_fulfill_htlcs[0].clone()
6783 _ => panic!("Unexpected event"),
6787 update_fulfill_msg.htlc_id = 1;
6789 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6791 assert!(nodes[0].node.list_channels().is_empty());
6792 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6793 assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6794 check_added_monitors!(nodes[0], 1);
6795 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6799 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6800 //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6802 let chanmon_cfgs = create_chanmon_cfgs(2);
6803 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6804 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6805 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6806 create_announced_chan_between_nodes(&nodes, 0, 1);
6808 let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6810 nodes[1].node.claim_funds(our_payment_preimage);
6811 check_added_monitors!(nodes[1], 1);
6812 expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6814 let events = nodes[1].node.get_and_clear_pending_msg_events();
6815 assert_eq!(events.len(), 1);
6816 let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6818 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6819 assert!(update_add_htlcs.is_empty());
6820 assert_eq!(update_fulfill_htlcs.len(), 1);
6821 assert!(update_fail_htlcs.is_empty());
6822 assert!(update_fail_malformed_htlcs.is_empty());
6823 assert!(update_fee.is_none());
6824 update_fulfill_htlcs[0].clone()
6826 _ => panic!("Unexpected event"),
6830 update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6832 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6834 assert!(nodes[0].node.list_channels().is_empty());
6835 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6836 assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6837 check_added_monitors!(nodes[0], 1);
6838 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6842 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6843 //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6845 let chanmon_cfgs = create_chanmon_cfgs(2);
6846 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6847 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6848 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6849 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6851 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6852 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6853 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6854 check_added_monitors!(nodes[0], 1);
6856 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6857 updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6859 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6860 check_added_monitors!(nodes[1], 0);
6861 commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6863 let events = nodes[1].node.get_and_clear_pending_msg_events();
6865 let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6867 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6868 assert!(update_add_htlcs.is_empty());
6869 assert!(update_fulfill_htlcs.is_empty());
6870 assert!(update_fail_htlcs.is_empty());
6871 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6872 assert!(update_fee.is_none());
6873 update_fail_malformed_htlcs[0].clone()
6875 _ => panic!("Unexpected event"),
6878 update_msg.failure_code &= !0x8000;
6879 nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6881 assert!(nodes[0].node.list_channels().is_empty());
6882 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6883 assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6884 check_added_monitors!(nodes[0], 1);
6885 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
6889 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6890 //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6891 // * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6893 let chanmon_cfgs = create_chanmon_cfgs(3);
6894 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6895 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6896 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6897 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6898 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6900 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6903 let mut payment_event = {
6904 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6905 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6906 check_added_monitors!(nodes[0], 1);
6907 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6908 assert_eq!(events.len(), 1);
6909 SendEvent::from_event(events.remove(0))
6911 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6912 check_added_monitors!(nodes[1], 0);
6913 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6914 expect_pending_htlcs_forwardable!(nodes[1]);
6915 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6916 assert_eq!(events_2.len(), 1);
6917 check_added_monitors!(nodes[1], 1);
6918 payment_event = SendEvent::from_event(events_2.remove(0));
6919 assert_eq!(payment_event.msgs.len(), 1);
6922 payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6923 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6924 check_added_monitors!(nodes[2], 0);
6925 commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6927 let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6928 assert_eq!(events_3.len(), 1);
6929 let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6931 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6932 assert!(update_add_htlcs.is_empty());
6933 assert!(update_fulfill_htlcs.is_empty());
6934 assert!(update_fail_htlcs.is_empty());
6935 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6936 assert!(update_fee.is_none());
6937 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6939 _ => panic!("Unexpected event"),
6943 nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6945 check_added_monitors!(nodes[1], 0);
6946 commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6947 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6948 let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6949 assert_eq!(events_4.len(), 1);
6951 //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6953 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6954 assert!(update_add_htlcs.is_empty());
6955 assert!(update_fulfill_htlcs.is_empty());
6956 assert_eq!(update_fail_htlcs.len(), 1);
6957 assert!(update_fail_malformed_htlcs.is_empty());
6958 assert!(update_fee.is_none());
6960 _ => panic!("Unexpected event"),
6963 check_added_monitors!(nodes[1], 1);
6967 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6968 let chanmon_cfgs = create_chanmon_cfgs(3);
6969 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6970 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6971 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6972 create_announced_chan_between_nodes(&nodes, 0, 1);
6973 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6975 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6978 let mut payment_event = {
6979 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6980 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6981 check_added_monitors!(nodes[0], 1);
6982 SendEvent::from_node(&nodes[0])
6985 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6986 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6987 expect_pending_htlcs_forwardable!(nodes[1]);
6988 check_added_monitors!(nodes[1], 1);
6989 payment_event = SendEvent::from_node(&nodes[1]);
6990 assert_eq!(payment_event.msgs.len(), 1);
6993 payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
6994 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6995 check_added_monitors!(nodes[2], 0);
6996 commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6998 let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6999 assert_eq!(events_3.len(), 1);
7001 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
7002 let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
7003 // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
7004 update_msg.failure_code |= 0x2000;
7006 nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
7007 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
7009 _ => panic!("Unexpected event"),
7012 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
7013 vec![HTLCDestination::NextHopChannel {
7014 node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
7015 let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
7016 assert_eq!(events_4.len(), 1);
7017 check_added_monitors!(nodes[1], 1);
7020 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
7021 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
7022 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
7024 _ => panic!("Unexpected event"),
7027 let events_5 = nodes[0].node.get_and_clear_pending_events();
7028 assert_eq!(events_5.len(), 2);
7030 // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
7031 // the node originating the error to its next hop.
7033 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
7035 assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
7036 assert!(is_permanent);
7037 assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
7039 _ => panic!("Unexpected event"),
7042 Event::PaymentFailed { payment_hash, .. } => {
7043 assert_eq!(payment_hash, our_payment_hash);
7045 _ => panic!("Unexpected event"),
7048 // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
7051 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
7052 // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
7053 // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
7054 // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
7056 let mut chanmon_cfgs = create_chanmon_cfgs(2);
7057 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7058 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7059 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7060 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7061 let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
7063 let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7064 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7066 // We route 2 dust-HTLCs between A and B
7067 let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7068 let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7069 route_payment(&nodes[0], &[&nodes[1]], 1000000);
7071 // Cache one local commitment tx as previous
7072 let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7074 // Fail one HTLC to prune it in the will-be-latest-local commitment tx
7075 nodes[1].node.fail_htlc_backwards(&payment_hash_2);
7076 check_added_monitors!(nodes[1], 0);
7077 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7078 check_added_monitors!(nodes[1], 1);
7080 let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7081 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
7082 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
7083 check_added_monitors!(nodes[0], 1);
7085 // Cache one local commitment tx as lastest
7086 let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7088 let events = nodes[0].node.get_and_clear_pending_msg_events();
7090 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
7091 assert_eq!(node_id, nodes[1].node.get_our_node_id());
7093 _ => panic!("Unexpected event"),
7096 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
7097 assert_eq!(node_id, nodes[1].node.get_our_node_id());
7099 _ => panic!("Unexpected event"),
7102 assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
7103 // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
7104 if announce_latest {
7105 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
7107 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
7110 check_closed_broadcast!(nodes[0], true);
7111 check_added_monitors!(nodes[0], 1);
7112 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7114 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7115 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7116 let events = nodes[0].node.get_and_clear_pending_events();
7117 // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
7118 assert_eq!(events.len(), 4);
7119 let mut first_failed = false;
7120 for event in events {
7122 Event::PaymentPathFailed { payment_hash, .. } => {
7123 if payment_hash == payment_hash_1 {
7124 assert!(!first_failed);
7125 first_failed = true;
7127 assert_eq!(payment_hash, payment_hash_2);
7130 Event::PaymentFailed { .. } => {}
7131 _ => panic!("Unexpected event"),
7137 fn test_failure_delay_dust_htlc_local_commitment() {
7138 do_test_failure_delay_dust_htlc_local_commitment(true);
7139 do_test_failure_delay_dust_htlc_local_commitment(false);
7142 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
7143 // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
7144 // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
7145 // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
7146 // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
7147 // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
7148 // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
7150 let chanmon_cfgs = create_chanmon_cfgs(3);
7151 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7152 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7153 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7154 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
7156 let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7157 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7159 let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7160 let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7162 let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7163 let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
7165 // We revoked bs_commitment_tx
7167 let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7168 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
7171 let mut timeout_tx = Vec::new();
7173 // We fail dust-HTLC 1 by broadcast of local commitment tx
7174 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7175 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7176 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7177 expect_payment_failed!(nodes[0], dust_hash, false);
7179 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7180 check_closed_broadcast!(nodes[0], true);
7181 check_added_monitors!(nodes[0], 1);
7182 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7183 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
7184 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7185 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7186 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7187 mine_transaction(&nodes[0], &timeout_tx[0]);
7188 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7189 expect_payment_failed!(nodes[0], non_dust_hash, false);
7191 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7192 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7193 check_closed_broadcast!(nodes[0], true);
7194 check_added_monitors!(nodes[0], 1);
7195 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7196 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7198 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7199 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
7200 .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
7201 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
7202 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
7203 // dust HTLC should have been failed.
7204 expect_payment_failed!(nodes[0], dust_hash, false);
7207 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7209 assert_eq!(timeout_tx[0].lock_time.to_consensus_u32(), 11);
7211 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
7212 mine_transaction(&nodes[0], &timeout_tx[0]);
7213 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7214 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7215 expect_payment_failed!(nodes[0], non_dust_hash, false);
7220 fn test_sweep_outbound_htlc_failure_update() {
7221 do_test_sweep_outbound_htlc_failure_update(false, true);
7222 do_test_sweep_outbound_htlc_failure_update(false, false);
7223 do_test_sweep_outbound_htlc_failure_update(true, false);
7227 fn test_user_configurable_csv_delay() {
7228 // We test our channel constructors yield errors when we pass them absurd csv delay
7230 let mut low_our_to_self_config = UserConfig::default();
7231 low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
7232 let mut high_their_to_self_config = UserConfig::default();
7233 high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
7234 let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7235 let chanmon_cfgs = create_chanmon_cfgs(2);
7236 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7237 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7238 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7240 // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
7241 if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7242 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
7243 &low_our_to_self_config, 0, 42, None)
7246 APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7247 _ => panic!("Unexpected event"),
7249 } else { assert!(false) }
7251 // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
7252 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7253 let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7254 open_channel.common_fields.to_self_delay = 200;
7255 if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7256 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7257 &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7260 ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7261 _ => panic!("Unexpected event"),
7263 } else { assert!(false); }
7265 // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7266 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7267 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7268 let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7269 accept_channel.common_fields.to_self_delay = 200;
7270 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
7272 if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7274 &ErrorAction::SendErrorMessage { ref msg } => {
7275 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7276 reason_msg = msg.data.clone();
7280 } else { panic!(); }
7281 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
7283 // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
7284 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7285 let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7286 open_channel.common_fields.to_self_delay = 200;
7287 if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7288 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7289 &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7292 ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7293 _ => panic!("Unexpected event"),
7295 } else { assert!(false); }
7299 fn test_check_htlc_underpaying() {
7300 // Send payment through A -> B but A is maliciously
7301 // sending a probe payment (i.e less than expected value0
7302 // to B, B should refuse payment.
7304 let chanmon_cfgs = create_chanmon_cfgs(2);
7305 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7306 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7307 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7309 // Create some initial channels
7310 create_announced_chan_between_nodes(&nodes, 0, 1);
7312 let scorer = test_utils::TestScorer::new();
7313 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7314 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
7315 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7316 let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
7317 let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
7318 None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7319 let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7320 let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7321 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7322 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7323 check_added_monitors!(nodes[0], 1);
7325 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7326 assert_eq!(events.len(), 1);
7327 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7328 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7329 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7331 // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7332 // and then will wait a second random delay before failing the HTLC back:
7333 expect_pending_htlcs_forwardable!(nodes[1]);
7334 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7336 // Node 3 is expecting payment of 100_000 but received 10_000,
7337 // it should fail htlc like we didn't know the preimage.
7338 nodes[1].node.process_pending_htlc_forwards();
7340 let events = nodes[1].node.get_and_clear_pending_msg_events();
7341 assert_eq!(events.len(), 1);
7342 let (update_fail_htlc, commitment_signed) = match events[0] {
7343 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7344 assert!(update_add_htlcs.is_empty());
7345 assert!(update_fulfill_htlcs.is_empty());
7346 assert_eq!(update_fail_htlcs.len(), 1);
7347 assert!(update_fail_malformed_htlcs.is_empty());
7348 assert!(update_fee.is_none());
7349 (update_fail_htlcs[0].clone(), commitment_signed)
7351 _ => panic!("Unexpected event"),
7353 check_added_monitors!(nodes[1], 1);
7355 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7356 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7358 // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7359 let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7360 expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7361 expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7365 fn test_announce_disable_channels() {
7366 // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7367 // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7369 let chanmon_cfgs = create_chanmon_cfgs(2);
7370 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7371 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7372 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7374 // Connect a dummy node for proper future events broadcasting
7375 connect_dummy_node(&nodes[0]);
7377 create_announced_chan_between_nodes(&nodes, 0, 1);
7378 create_announced_chan_between_nodes(&nodes, 1, 0);
7379 create_announced_chan_between_nodes(&nodes, 0, 1);
7382 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7383 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7385 for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
7386 nodes[0].node.timer_tick_occurred();
7388 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7389 assert_eq!(msg_events.len(), 3);
7390 let mut chans_disabled = new_hash_map();
7391 for e in msg_events {
7393 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7394 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7395 // Check that each channel gets updated exactly once
7396 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7397 panic!("Generated ChannelUpdate for wrong chan!");
7400 _ => panic!("Unexpected event"),
7404 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
7405 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
7407 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7408 assert_eq!(reestablish_1.len(), 3);
7409 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
7410 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
7412 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7413 assert_eq!(reestablish_2.len(), 3);
7415 // Reestablish chan_1
7416 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7417 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7418 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7419 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7420 // Reestablish chan_2
7421 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7422 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7423 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7424 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7425 // Reestablish chan_3
7426 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7427 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7428 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7429 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7431 for _ in 0..ENABLE_GOSSIP_TICKS {
7432 nodes[0].node.timer_tick_occurred();
7434 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7435 nodes[0].node.timer_tick_occurred();
7436 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7437 assert_eq!(msg_events.len(), 3);
7438 for e in msg_events {
7440 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7441 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7442 match chans_disabled.remove(&msg.contents.short_channel_id) {
7443 // Each update should have a higher timestamp than the previous one, replacing
7445 Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7446 None => panic!("Generated ChannelUpdate for wrong chan!"),
7449 _ => panic!("Unexpected event"),
7452 // Check that each channel gets updated exactly once
7453 assert!(chans_disabled.is_empty());
7457 fn test_bump_penalty_txn_on_revoked_commitment() {
7458 // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7459 // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7461 let chanmon_cfgs = create_chanmon_cfgs(2);
7462 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7463 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7464 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7466 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7468 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7469 let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7470 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7471 let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
7472 send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7474 let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7475 // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7476 assert_eq!(revoked_txn[0].output.len(), 4);
7477 assert_eq!(revoked_txn[0].input.len(), 1);
7478 assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7479 let revoked_txid = revoked_txn[0].txid();
7481 let mut penalty_sum = 0;
7482 for outp in revoked_txn[0].output.iter() {
7483 if outp.script_pubkey.is_v0_p2wsh() {
7484 penalty_sum += outp.value;
7488 // Connect blocks to change height_timer range to see if we use right soonest_timelock
7489 let header_114 = connect_blocks(&nodes[1], 14);
7491 // Actually revoke tx by claiming a HTLC
7492 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7493 connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
7494 check_added_monitors!(nodes[1], 1);
7496 // One or more justice tx should have been broadcast, check it
7500 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7501 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7502 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7503 assert_eq!(node_txn[0].output.len(), 1);
7504 check_spends!(node_txn[0], revoked_txn[0]);
7505 let fee_1 = penalty_sum - node_txn[0].output[0].value;
7506 feerate_1 = fee_1 * 1000 / node_txn[0].weight().to_wu();
7507 penalty_1 = node_txn[0].txid();
7511 // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7512 connect_blocks(&nodes[1], 15);
7513 let mut penalty_2 = penalty_1;
7514 let mut feerate_2 = 0;
7516 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7517 assert_eq!(node_txn.len(), 1);
7518 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7519 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7520 assert_eq!(node_txn[0].output.len(), 1);
7521 check_spends!(node_txn[0], revoked_txn[0]);
7522 penalty_2 = node_txn[0].txid();
7523 // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7524 assert_ne!(penalty_2, penalty_1);
7525 let fee_2 = penalty_sum - node_txn[0].output[0].value;
7526 feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7527 // Verify 25% bump heuristic
7528 assert!(feerate_2 * 100 >= feerate_1 * 125);
7532 assert_ne!(feerate_2, 0);
7534 // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7535 connect_blocks(&nodes[1], 1);
7537 let mut feerate_3 = 0;
7539 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7540 assert_eq!(node_txn.len(), 1);
7541 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7542 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7543 assert_eq!(node_txn[0].output.len(), 1);
7544 check_spends!(node_txn[0], revoked_txn[0]);
7545 penalty_3 = node_txn[0].txid();
7546 // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7547 assert_ne!(penalty_3, penalty_2);
7548 let fee_3 = penalty_sum - node_txn[0].output[0].value;
7549 feerate_3 = fee_3 * 1000 / node_txn[0].weight().to_wu();
7550 // Verify 25% bump heuristic
7551 assert!(feerate_3 * 100 >= feerate_2 * 125);
7555 assert_ne!(feerate_3, 0);
7557 nodes[1].node.get_and_clear_pending_events();
7558 nodes[1].node.get_and_clear_pending_msg_events();
7562 fn test_bump_penalty_txn_on_revoked_htlcs() {
7563 // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7564 // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7566 let mut chanmon_cfgs = create_chanmon_cfgs(2);
7567 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7568 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7569 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7570 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7572 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7573 // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7574 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7575 let scorer = test_utils::TestScorer::new();
7576 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7577 let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7578 let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
7579 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7580 let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7581 let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50)
7582 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7583 let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7584 let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
7585 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7586 let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
7588 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7589 assert_eq!(revoked_local_txn[0].input.len(), 1);
7590 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7592 // Revoke local commitment tx
7593 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7595 // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7596 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
7597 check_closed_broadcast!(nodes[1], true);
7598 check_added_monitors!(nodes[1], 1);
7599 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
7600 connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7602 let revoked_htlc_txn = {
7603 let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
7604 assert_eq!(txn.len(), 2);
7606 assert_eq!(txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7607 assert_eq!(txn[0].input.len(), 1);
7608 check_spends!(txn[0], revoked_local_txn[0]);
7610 assert_eq!(txn[1].input.len(), 1);
7611 assert_eq!(txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7612 assert_eq!(txn[1].output.len(), 1);
7613 check_spends!(txn[1], revoked_local_txn[0]);
7618 // Broadcast set of revoked txn on A
7619 let hash_128 = connect_blocks(&nodes[0], 40);
7620 let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
7621 connect_block(&nodes[0], &block_11);
7622 let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
7623 connect_block(&nodes[0], &block_129);
7624 let events = nodes[0].node.get_and_clear_pending_events();
7625 expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
7626 match events.last().unwrap() {
7627 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7628 _ => panic!("Unexpected event"),
7634 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7635 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7636 // Verify claim tx are spending revoked HTLC txn
7638 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7639 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7640 // which are included in the same block (they are broadcasted because we scan the
7641 // transactions linearly and generate claims as we go, they likely should be removed in the
7643 assert_eq!(node_txn[0].input.len(), 1);
7644 check_spends!(node_txn[0], revoked_local_txn[0]);
7645 assert_eq!(node_txn[1].input.len(), 1);
7646 check_spends!(node_txn[1], revoked_local_txn[0]);
7647 assert_eq!(node_txn[2].input.len(), 1);
7648 check_spends!(node_txn[2], revoked_local_txn[0]);
7650 // Each of the three justice transactions claim a separate (single) output of the three
7651 // available, which we check here:
7652 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7653 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7654 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7656 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7657 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7659 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7660 // output, checked above).
7661 assert_eq!(node_txn[3].input.len(), 2);
7662 assert_eq!(node_txn[3].output.len(), 1);
7663 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7665 first = node_txn[3].txid();
7666 // Store both feerates for later comparison
7667 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7668 feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu();
7669 penalty_txn = vec![node_txn[2].clone()];
7673 // Connect one more block to see if bumped penalty are issued for HTLC txn
7674 let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
7675 connect_block(&nodes[0], &block_130);
7676 let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
7677 connect_block(&nodes[0], &block_131);
7679 // Few more blocks to confirm penalty txn
7680 connect_blocks(&nodes[0], 4);
7681 assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7682 let header_144 = connect_blocks(&nodes[0], 9);
7684 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7685 assert_eq!(node_txn.len(), 1);
7687 assert_eq!(node_txn[0].input.len(), 2);
7688 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7689 // Verify bumped tx is different and 25% bump heuristic
7690 assert_ne!(first, node_txn[0].txid());
7691 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7692 let feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7693 assert!(feerate_2 * 100 > feerate_1 * 125);
7694 let txn = vec![node_txn[0].clone()];
7698 // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7699 connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
7700 connect_blocks(&nodes[0], 20);
7702 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7703 // We verify than no new transaction has been broadcast because previously
7704 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7705 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7706 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7707 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7708 // up bumped justice generation.
7709 assert_eq!(node_txn.len(), 0);
7712 check_closed_broadcast!(nodes[0], true);
7713 check_added_monitors!(nodes[0], 1);
7717 fn test_bump_penalty_txn_on_remote_commitment() {
7718 // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7719 // we're able to claim outputs on remote commitment transaction before timelocks expiration
7722 // Provide preimage for one
7723 // Check aggregation
7725 let chanmon_cfgs = create_chanmon_cfgs(2);
7726 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7727 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7728 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7730 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7731 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7732 route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7734 // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7735 let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7736 assert_eq!(remote_txn[0].output.len(), 4);
7737 assert_eq!(remote_txn[0].input.len(), 1);
7738 assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7740 // Claim a HTLC without revocation (provide B monitor with preimage)
7741 nodes[1].node.claim_funds(payment_preimage);
7742 expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7743 mine_transaction(&nodes[1], &remote_txn[0]);
7744 check_added_monitors!(nodes[1], 2);
7745 connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7747 // One or more claim tx should have been broadcast, check it
7751 let feerate_timeout;
7752 let feerate_preimage;
7754 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7755 // 3 transactions including:
7756 // preimage and timeout sweeps from remote commitment + preimage sweep bump
7757 assert_eq!(node_txn.len(), 3);
7758 assert_eq!(node_txn[0].input.len(), 1);
7759 assert_eq!(node_txn[1].input.len(), 1);
7760 assert_eq!(node_txn[2].input.len(), 1);
7761 check_spends!(node_txn[0], remote_txn[0]);
7762 check_spends!(node_txn[1], remote_txn[0]);
7763 check_spends!(node_txn[2], remote_txn[0]);
7765 preimage = node_txn[0].txid();
7766 let index = node_txn[0].input[0].previous_output.vout;
7767 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7768 feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu();
7770 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7771 (node_txn[2].clone(), node_txn[1].clone())
7773 (node_txn[1].clone(), node_txn[2].clone())
7776 preimage_bump = preimage_bump_tx;
7777 check_spends!(preimage_bump, remote_txn[0]);
7778 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7780 timeout = timeout_tx.txid();
7781 let index = timeout_tx.input[0].previous_output.vout;
7782 let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value;
7783 feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu();
7787 assert_ne!(feerate_timeout, 0);
7788 assert_ne!(feerate_preimage, 0);
7790 // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7791 connect_blocks(&nodes[1], 1);
7793 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7794 assert_eq!(node_txn.len(), 1);
7795 assert_eq!(node_txn[0].input.len(), 1);
7796 assert_eq!(preimage_bump.input.len(), 1);
7797 check_spends!(node_txn[0], remote_txn[0]);
7798 check_spends!(preimage_bump, remote_txn[0]);
7800 let index = preimage_bump.input[0].previous_output.vout;
7801 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
7802 let new_feerate = fee * 1000 / preimage_bump.weight().to_wu();
7803 assert!(new_feerate * 100 > feerate_timeout * 125);
7804 assert_ne!(timeout, preimage_bump.txid());
7806 let index = node_txn[0].input[0].previous_output.vout;
7807 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7808 let new_feerate = fee * 1000 / node_txn[0].weight().to_wu();
7809 assert!(new_feerate * 100 > feerate_preimage * 125);
7810 assert_ne!(preimage, node_txn[0].txid());
7815 nodes[1].node.get_and_clear_pending_events();
7816 nodes[1].node.get_and_clear_pending_msg_events();
7820 fn test_counterparty_raa_skip_no_crash() {
7821 // Previously, if our counterparty sent two RAAs in a row without us having provided a
7822 // commitment transaction, we would have happily carried on and provided them the next
7823 // commitment transaction based on one RAA forward. This would probably eventually have led to
7824 // channel closure, but it would not have resulted in funds loss. Still, our
7825 // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
7826 // check simply that the channel is closed in response to such an RAA, but don't check whether
7827 // we decide to punish our counterparty for revoking their funds (as we don't currently
7829 let chanmon_cfgs = create_chanmon_cfgs(2);
7830 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7831 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7832 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7833 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7835 let per_commitment_secret;
7836 let next_per_commitment_point;
7838 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7839 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7840 let keys = guard.channel_by_id.get_mut(&channel_id).map(
7841 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
7842 ).flatten().unwrap().get_signer();
7844 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7846 // Make signer believe we got a counterparty signature, so that it allows the revocation
7847 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7848 per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7850 // Must revoke without gaps
7851 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7852 keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7854 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7855 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7856 &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7859 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7860 &msgs::RevokeAndACK {
7862 per_commitment_secret,
7863 next_per_commitment_point,
7865 next_local_nonce: None,
7867 assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7868 check_added_monitors!(nodes[1], 1);
7869 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
7870 , [nodes[0].node.get_our_node_id()], 100000);
7874 fn test_bump_txn_sanitize_tracking_maps() {
7875 // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7876 // verify we clean then right after expiration of ANTI_REORG_DELAY.
7878 let chanmon_cfgs = create_chanmon_cfgs(2);
7879 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7880 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7881 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7883 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7884 // Lock HTLC in both directions
7885 let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7886 let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7888 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7889 assert_eq!(revoked_local_txn[0].input.len(), 1);
7890 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7892 // Revoke local commitment tx
7893 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7895 // Broadcast set of revoked txn on A
7896 connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7897 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7898 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7900 mine_transaction(&nodes[0], &revoked_local_txn[0]);
7901 check_closed_broadcast!(nodes[0], true);
7902 check_added_monitors!(nodes[0], 1);
7903 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
7905 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7906 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7907 check_spends!(node_txn[0], revoked_local_txn[0]);
7908 check_spends!(node_txn[1], revoked_local_txn[0]);
7909 check_spends!(node_txn[2], revoked_local_txn[0]);
7910 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7914 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
7915 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7917 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7918 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7919 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7924 fn test_channel_conf_timeout() {
7925 // Tests that, for inbound channels, we give up on them if the funding transaction does not
7926 // confirm within 2016 blocks, as recommended by BOLT 2.
7927 let chanmon_cfgs = create_chanmon_cfgs(2);
7928 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7929 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7930 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7932 let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7934 // The outbound node should wait forever for confirmation:
7935 // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7936 // copied here instead of directly referencing the constant.
7937 connect_blocks(&nodes[0], 2016);
7938 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7940 // The inbound node should fail the channel after exactly 2016 blocks
7941 connect_blocks(&nodes[1], 2015);
7942 check_added_monitors!(nodes[1], 0);
7943 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7945 connect_blocks(&nodes[1], 1);
7946 check_added_monitors!(nodes[1], 1);
7947 check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
7948 let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7949 assert_eq!(close_ev.len(), 1);
7951 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => {
7952 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7953 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7955 _ => panic!("Unexpected event"),
7960 fn test_override_channel_config() {
7961 let chanmon_cfgs = create_chanmon_cfgs(2);
7962 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7963 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7964 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7966 // Node0 initiates a channel to node1 using the override config.
7967 let mut override_config = UserConfig::default();
7968 override_config.channel_handshake_config.our_to_self_delay = 200;
7970 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap();
7972 // Assert the channel created by node0 is using the override config.
7973 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7974 assert_eq!(res.common_fields.channel_flags, 0);
7975 assert_eq!(res.common_fields.to_self_delay, 200);
7979 fn test_override_0msat_htlc_minimum() {
7980 let mut zero_config = UserConfig::default();
7981 zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7982 let chanmon_cfgs = create_chanmon_cfgs(2);
7983 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7984 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7985 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7987 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
7988 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7989 assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7991 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7992 let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7993 assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7997 fn test_channel_update_has_correct_htlc_maximum_msat() {
7998 // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
7999 // Bolt 7 specifies that if present `htlc_maximum_msat`:
8000 // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
8001 // 90% of the `channel_value`.
8002 // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
8004 let mut config_30_percent = UserConfig::default();
8005 config_30_percent.channel_handshake_config.announced_channel = true;
8006 config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
8007 let mut config_50_percent = UserConfig::default();
8008 config_50_percent.channel_handshake_config.announced_channel = true;
8009 config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
8010 let mut config_95_percent = UserConfig::default();
8011 config_95_percent.channel_handshake_config.announced_channel = true;
8012 config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
8013 let mut config_100_percent = UserConfig::default();
8014 config_100_percent.channel_handshake_config.announced_channel = true;
8015 config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
8017 let chanmon_cfgs = create_chanmon_cfgs(4);
8018 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8019 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
8020 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8022 let channel_value_satoshis = 100000;
8023 let channel_value_msat = channel_value_satoshis * 1000;
8024 let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
8025 let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
8026 let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
8028 let (node_0_chan_update, node_1_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
8029 let (node_2_chan_update, node_3_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
8031 // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
8032 // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
8033 assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
8034 // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
8035 // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
8036 assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
8038 // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8039 // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
8041 assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8042 // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8043 // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
8045 assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8049 fn test_manually_accept_inbound_channel_request() {
8050 let mut manually_accept_conf = UserConfig::default();
8051 manually_accept_conf.manually_accept_inbound_channels = true;
8052 let chanmon_cfgs = create_chanmon_cfgs(2);
8053 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8054 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8055 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8057 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8058 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8060 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8062 // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8063 // accepting the inbound channel request.
8064 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8066 let events = nodes[1].node.get_and_clear_pending_events();
8068 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8069 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
8071 _ => panic!("Unexpected event"),
8074 let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8075 assert_eq!(accept_msg_ev.len(), 1);
8077 match accept_msg_ev[0] {
8078 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8079 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8081 _ => panic!("Unexpected event"),
8084 nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8086 let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8087 assert_eq!(close_msg_ev.len(), 1);
8089 let events = nodes[1].node.get_and_clear_pending_events();
8091 Event::ChannelClosed { user_channel_id, .. } => {
8092 assert_eq!(user_channel_id, 23);
8094 _ => panic!("Unexpected event"),
8099 fn test_manually_reject_inbound_channel_request() {
8100 let mut manually_accept_conf = UserConfig::default();
8101 manually_accept_conf.manually_accept_inbound_channels = true;
8102 let chanmon_cfgs = create_chanmon_cfgs(2);
8103 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8104 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8105 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8107 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8108 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8110 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8112 // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8113 // rejecting the inbound channel request.
8114 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8116 let events = nodes[1].node.get_and_clear_pending_events();
8118 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8119 nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8121 _ => panic!("Unexpected event"),
8124 let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8125 assert_eq!(close_msg_ev.len(), 1);
8127 match close_msg_ev[0] {
8128 MessageSendEvent::HandleError { ref node_id, .. } => {
8129 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8131 _ => panic!("Unexpected event"),
8134 // There should be no more events to process, as the channel was never opened.
8135 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
8139 fn test_can_not_accept_inbound_channel_twice() {
8140 let mut manually_accept_conf = UserConfig::default();
8141 manually_accept_conf.manually_accept_inbound_channels = true;
8142 let chanmon_cfgs = create_chanmon_cfgs(2);
8143 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8144 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8145 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8147 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8148 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8150 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8152 // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8153 // accepting the inbound channel request.
8154 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8156 let events = nodes[1].node.get_and_clear_pending_events();
8158 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8159 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
8160 let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
8162 Err(APIError::APIMisuseError { err }) => {
8163 assert_eq!(err, "No such channel awaiting to be accepted.");
8165 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
8166 Err(e) => panic!("Unexpected Error {:?}", e),
8169 _ => panic!("Unexpected event"),
8172 // Ensure that the channel wasn't closed after attempting to accept it twice.
8173 let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8174 assert_eq!(accept_msg_ev.len(), 1);
8176 match accept_msg_ev[0] {
8177 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8178 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8180 _ => panic!("Unexpected event"),
8185 fn test_can_not_accept_unknown_inbound_channel() {
8186 let chanmon_cfg = create_chanmon_cfgs(2);
8187 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
8188 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
8189 let nodes = create_network(2, &node_cfg, &node_chanmgr);
8191 let unknown_channel_id = ChannelId::new_zero();
8192 let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
8194 Err(APIError::APIMisuseError { err }) => {
8195 assert_eq!(err, "No such channel awaiting to be accepted.");
8197 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
8198 Err(e) => panic!("Unexpected Error: {:?}", e),
8203 fn test_onion_value_mpp_set_calculation() {
8204 // Test that we use the onion value `amt_to_forward` when
8205 // calculating whether we've reached the `total_msat` of an MPP
8206 // by having a routing node forward more than `amt_to_forward`
8207 // and checking that the receiving node doesn't generate
8208 // a PaymentClaimable event too early
8210 let chanmon_cfgs = create_chanmon_cfgs(node_count);
8211 let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8212 let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8213 let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8215 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8216 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8217 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8218 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8220 let total_msat = 100_000;
8221 let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8222 let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8223 let sample_path = route.paths.pop().unwrap();
8225 let mut path_1 = sample_path.clone();
8226 path_1.hops[0].pubkey = nodes[1].node.get_our_node_id();
8227 path_1.hops[0].short_channel_id = chan_1_id;
8228 path_1.hops[1].pubkey = nodes[3].node.get_our_node_id();
8229 path_1.hops[1].short_channel_id = chan_3_id;
8230 path_1.hops[1].fee_msat = 100_000;
8231 route.paths.push(path_1);
8233 let mut path_2 = sample_path.clone();
8234 path_2.hops[0].pubkey = nodes[2].node.get_our_node_id();
8235 path_2.hops[0].short_channel_id = chan_2_id;
8236 path_2.hops[1].pubkey = nodes[3].node.get_our_node_id();
8237 path_2.hops[1].short_channel_id = chan_4_id;
8238 path_2.hops[1].fee_msat = 1_000;
8239 route.paths.push(path_2);
8242 let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8243 let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8244 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8245 nodes[0].node.test_send_payment_internal(&route, our_payment_hash,
8246 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8247 check_added_monitors!(nodes[0], expected_paths.len());
8249 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8250 assert_eq!(events.len(), expected_paths.len());
8253 let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8254 let mut payment_event = SendEvent::from_event(ev);
8255 let mut prev_node = &nodes[0];
8257 for (idx, &node) in expected_paths[0].iter().enumerate() {
8258 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8260 if idx == 0 { // routing node
8261 let session_priv = [3; 32];
8262 let height = nodes[0].best_block_info().1;
8263 let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8264 let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8265 let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
8266 let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
8267 &recipient_onion_fields, height + 1, &None).unwrap();
8268 // Edit amt_to_forward to simulate the sender having set
8269 // the final amount and the routing node taking less fee
8270 if let msgs::OutboundOnionPayload::Receive {
8271 ref mut sender_intended_htlc_amt_msat, ..
8272 } = onion_payloads[1] {
8273 *sender_intended_htlc_amt_msat = 99_000;
8275 let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
8276 payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8279 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8280 check_added_monitors!(node, 0);
8281 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8282 expect_pending_htlcs_forwardable!(node);
8285 let mut events_2 = node.node.get_and_clear_pending_msg_events();
8286 assert_eq!(events_2.len(), 1);
8287 check_added_monitors!(node, 1);
8288 payment_event = SendEvent::from_event(events_2.remove(0));
8289 assert_eq!(payment_event.msgs.len(), 1);
8291 let events_2 = node.node.get_and_clear_pending_events();
8292 assert!(events_2.is_empty());
8299 let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8300 pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8302 claim_payment_along_route(
8303 ClaimAlongRouteArgs::new(&nodes[0], expected_paths, our_payment_preimage)
8307 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8309 let routing_node_count = msat_amounts.len();
8310 let node_count = routing_node_count + 2;
8312 let chanmon_cfgs = create_chanmon_cfgs(node_count);
8313 let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8314 let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8315 let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8320 // Create channels for each amount
8321 let mut expected_paths = Vec::with_capacity(routing_node_count);
8322 let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8323 let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8324 for i in 0..routing_node_count {
8325 let routing_node = 2 + i;
8326 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8327 src_chan_ids.push(src_chan_id);
8328 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8329 dst_chan_ids.push(dst_chan_id);
8330 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8331 expected_paths.push(path);
8333 let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8335 // Create a route for each amount
8336 let example_amount = 100000;
8337 let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8338 let sample_path = route.paths.pop().unwrap();
8339 for i in 0..routing_node_count {
8340 let routing_node = 2 + i;
8341 let mut path = sample_path.clone();
8342 path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id();
8343 path.hops[0].short_channel_id = src_chan_ids[i];
8344 path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8345 path.hops[1].short_channel_id = dst_chan_ids[i];
8346 path.hops[1].fee_msat = msat_amounts[i];
8347 route.paths.push(path);
8350 // Send payment with manually set total_msat
8351 let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8352 let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash,
8353 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8354 nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash,
8355 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8356 check_added_monitors!(nodes[src_idx], expected_paths.len());
8358 let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8359 assert_eq!(events.len(), expected_paths.len());
8360 let mut amount_received = 0;
8361 for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8362 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8364 let current_path_amount = msat_amounts[path_idx];
8365 amount_received += current_path_amount;
8366 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8367 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8370 claim_payment_along_route(
8371 ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, our_payment_preimage)
8376 fn test_overshoot_mpp() {
8377 do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8378 do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8382 fn test_simple_mpp() {
8383 // Simple test of sending a multi-path payment.
8384 let chanmon_cfgs = create_chanmon_cfgs(4);
8385 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8386 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8387 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8389 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8390 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8391 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8392 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8394 let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8395 let path = route.paths[0].clone();
8396 route.paths.push(path);
8397 route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
8398 route.paths[0].hops[0].short_channel_id = chan_1_id;
8399 route.paths[0].hops[1].short_channel_id = chan_3_id;
8400 route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
8401 route.paths[1].hops[0].short_channel_id = chan_2_id;
8402 route.paths[1].hops[1].short_channel_id = chan_4_id;
8403 send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8404 claim_payment_along_route(
8405 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage)
8410 fn test_preimage_storage() {
8411 // Simple test of payment preimage storage allowing no client-side storage to claim payments
8412 let chanmon_cfgs = create_chanmon_cfgs(2);
8413 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8414 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8415 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8417 create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8420 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8421 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8422 nodes[0].node.send_payment_with_route(&route, payment_hash,
8423 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8424 check_added_monitors!(nodes[0], 1);
8425 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8426 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8427 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8428 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8430 // Note that after leaving the above scope we have no knowledge of any arguments or return
8431 // values from previous calls.
8432 expect_pending_htlcs_forwardable!(nodes[1]);
8433 let events = nodes[1].node.get_and_clear_pending_events();
8434 assert_eq!(events.len(), 1);
8436 Event::PaymentClaimable { ref purpose, .. } => {
8438 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => {
8439 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8441 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
8444 _ => panic!("Unexpected event"),
8449 fn test_bad_secret_hash() {
8450 // Simple test of unregistered payment hash/invalid payment secret handling
8451 let chanmon_cfgs = create_chanmon_cfgs(2);
8452 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8453 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8454 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8456 create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8458 let random_payment_hash = PaymentHash([42; 32]);
8459 let random_payment_secret = PaymentSecret([43; 32]);
8460 let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8461 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8463 // All the below cases should end up being handled exactly identically, so we macro the
8464 // resulting events.
8465 macro_rules! handle_unknown_invalid_payment_data {
8466 ($payment_hash: expr) => {
8467 check_added_monitors!(nodes[0], 1);
8468 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8469 let payment_event = SendEvent::from_event(events.pop().unwrap());
8470 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8471 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8473 // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8474 // again to process the pending backwards-failure of the HTLC
8475 expect_pending_htlcs_forwardable!(nodes[1]);
8476 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8477 check_added_monitors!(nodes[1], 1);
8479 // We should fail the payment back
8480 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8481 match events.pop().unwrap() {
8482 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8483 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8484 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8486 _ => panic!("Unexpected event"),
8491 let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8492 // Error data is the HTLC value (100,000) and current block height
8493 let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8495 // Send a payment with the right payment hash but the wrong payment secret
8496 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8497 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8498 handle_unknown_invalid_payment_data!(our_payment_hash);
8499 expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8501 // Send a payment with a random payment hash, but the right payment secret
8502 nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8503 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8504 handle_unknown_invalid_payment_data!(random_payment_hash);
8505 expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8507 // Send a payment with a random payment hash and random payment secret
8508 nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8509 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8510 handle_unknown_invalid_payment_data!(random_payment_hash);
8511 expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8515 fn test_update_err_monitor_lockdown() {
8516 // Our monitor will lock update of local commitment transaction if a broadcastion condition
8517 // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8518 // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8521 // This scenario may happen in a watchtower setup, where watchtower process a block height
8522 // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8523 // commitment at same time.
8525 let chanmon_cfgs = create_chanmon_cfgs(2);
8526 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8527 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8528 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8530 // Create some initial channel
8531 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8532 let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8534 // Rebalance the network to generate htlc in the two directions
8535 send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8537 // Route a HTLC from node 0 to node 1 (but don't settle)
8538 let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8540 // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8541 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8542 let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8543 let persister = test_utils::TestPersister::new();
8546 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8547 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8548 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8549 assert!(new_monitor == *monitor);
8552 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8553 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8556 let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8557 // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8558 // transaction lock time requirements here.
8559 chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
8560 watchtower.chain_monitor.block_connected(&block, 200);
8562 // Try to update ChannelMonitor
8563 nodes[1].node.claim_funds(preimage);
8564 check_added_monitors!(nodes[1], 1);
8565 expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8567 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8568 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8569 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8571 let mut node_0_per_peer_lock;
8572 let mut node_0_peer_state_lock;
8573 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8574 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8575 assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8576 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8577 } else { assert!(false); }
8582 // Our local monitor is in-sync and hasn't processed yet timeout
8583 check_added_monitors!(nodes[0], 1);
8584 let events = nodes[0].node.get_and_clear_pending_events();
8585 assert_eq!(events.len(), 1);
8589 fn test_concurrent_monitor_claim() {
8590 // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8591 // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8592 // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8593 // state N+1 confirms. Alice claims output from state N+1.
8595 let chanmon_cfgs = create_chanmon_cfgs(2);
8596 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8597 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8598 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8600 // Create some initial channel
8601 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8602 let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8604 // Rebalance the network to generate htlc in the two directions
8605 send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8607 // Route a HTLC from node 0 to node 1 (but don't settle)
8608 route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8610 // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8611 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8612 let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8613 let persister = test_utils::TestPersister::new();
8614 let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
8615 Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
8617 let watchtower_alice = {
8619 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8620 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8621 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8622 assert!(new_monitor == *monitor);
8625 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8626 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8629 let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8630 // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
8631 // requirements here.
8632 const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
8633 alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
8634 watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
8636 // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8638 let mut txn = alice_broadcaster.txn_broadcast();
8639 assert_eq!(txn.len(), 2);
8640 check_spends!(txn[0], chan_1.3);
8641 check_spends!(txn[1], txn[0]);
8644 // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8645 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8646 let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8647 let persister = test_utils::TestPersister::new();
8648 let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
8649 let watchtower_bob = {
8651 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8652 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8653 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8654 assert!(new_monitor == *monitor);
8657 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8658 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8661 watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
8663 // Route another payment to generate another update with still previous HTLC pending
8664 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8665 nodes[1].node.send_payment_with_route(&route, payment_hash,
8666 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8667 check_added_monitors!(nodes[1], 1);
8669 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8670 assert_eq!(updates.update_add_htlcs.len(), 1);
8671 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8673 let mut node_0_per_peer_lock;
8674 let mut node_0_peer_state_lock;
8675 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8676 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8677 // Watchtower Alice should already have seen the block and reject the update
8678 assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8679 assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8680 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8681 } else { assert!(false); }
8686 // Our local monitor is in-sync and hasn't processed yet timeout
8687 check_added_monitors!(nodes[0], 1);
8689 //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8690 watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
8692 // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8695 let mut txn = bob_broadcaster.txn_broadcast();
8696 assert_eq!(txn.len(), 2);
8697 bob_state_y = txn.remove(0);
8700 // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8701 let height = HTLC_TIMEOUT_BROADCAST + 1;
8702 connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
8703 check_closed_broadcast(&nodes[0], 1, true);
8704 check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
8705 [nodes[1].node.get_our_node_id()], 100000);
8706 watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
8707 check_added_monitors(&nodes[0], 1);
8709 let htlc_txn = alice_broadcaster.txn_broadcast();
8710 assert_eq!(htlc_txn.len(), 1);
8711 check_spends!(htlc_txn[0], bob_state_y);
8716 fn test_pre_lockin_no_chan_closed_update() {
8717 // Test that if a peer closes a channel in response to a funding_created message we don't
8718 // generate a channel update (as the channel cannot appear on chain without a funding_signed
8721 // Doing so would imply a channel monitor update before the initial channel monitor
8722 // registration, violating our API guarantees.
8724 // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8725 // then opening a second channel with the same funding output as the first (which is not
8726 // rejected because the first channel does not exist in the ChannelManager) and closing it
8727 // before receiving funding_signed.
8728 let chanmon_cfgs = create_chanmon_cfgs(2);
8729 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8730 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8731 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8733 // Create an initial channel
8734 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8735 let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8736 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8737 let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8738 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8740 // Move the first channel through the funding flow...
8741 let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8743 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8744 check_added_monitors!(nodes[0], 0);
8746 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8747 let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
8748 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8749 assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8750 check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
8751 [nodes[1].node.get_our_node_id()], 100000);
8755 fn test_htlc_no_detection() {
8756 // This test is a mutation to underscore the detection logic bug we had
8757 // before #653. HTLC value routed is above the remaining balance, thus
8758 // inverting HTLC and `to_remote` output. HTLC will come second and
8759 // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8760 // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8761 // outputs order detection for correct spending children filtring.
8763 let chanmon_cfgs = create_chanmon_cfgs(2);
8764 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8765 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8766 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8768 // Create some initial channels
8769 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8771 send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8772 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8773 let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8774 assert_eq!(local_txn[0].input.len(), 1);
8775 assert_eq!(local_txn[0].output.len(), 3);
8776 check_spends!(local_txn[0], chan_1.3);
8778 // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8779 let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
8780 connect_block(&nodes[0], &block);
8781 // We deliberately connect the local tx twice as this should provoke a failure calling
8782 // this test before #653 fix.
8783 chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
8784 check_closed_broadcast!(nodes[0], true);
8785 check_added_monitors!(nodes[0], 1);
8786 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
8787 connect_blocks(&nodes[0], TEST_FINAL_CLTV);
8789 let htlc_timeout = {
8790 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8791 assert_eq!(node_txn.len(), 1);
8792 assert_eq!(node_txn[0].input.len(), 1);
8793 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8794 check_spends!(node_txn[0], local_txn[0]);
8798 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
8799 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8800 expect_payment_failed!(nodes[0], our_payment_hash, false);
8803 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8804 // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8805 // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8806 // Carol, Alice would be the upstream node, and Carol the downstream.)
8808 // Steps of the test:
8809 // 1) Alice sends a HTLC to Carol through Bob.
8810 // 2) Carol doesn't settle the HTLC.
8811 // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8812 // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8813 // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8814 // but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8815 // 5) Carol release the preimage to Bob off-chain.
8816 // 6) Bob claims the offered output on the broadcasted commitment.
8817 let chanmon_cfgs = create_chanmon_cfgs(3);
8818 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8819 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8820 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8822 // Create some initial channels
8823 let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8824 create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8826 // Steps (1) and (2):
8827 // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8828 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8830 // Check that Alice's commitment transaction now contains an output for this HTLC.
8831 let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8832 check_spends!(alice_txn[0], chan_ab.3);
8833 assert_eq!(alice_txn[0].output.len(), 2);
8834 check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8835 assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8836 assert_eq!(alice_txn.len(), 2);
8838 // Steps (3) and (4):
8839 // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8840 // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8841 let mut force_closing_node = 0; // Alice force-closes
8842 let mut counterparty_node = 1; // Bob if Alice force-closes
8845 if !broadcast_alice {
8846 force_closing_node = 1;
8847 counterparty_node = 0;
8849 nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
8850 check_closed_broadcast!(nodes[force_closing_node], true);
8851 check_added_monitors!(nodes[force_closing_node], 1);
8852 check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
8853 if go_onchain_before_fulfill {
8854 let txn_to_broadcast = match broadcast_alice {
8855 true => alice_txn.clone(),
8856 false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8858 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8859 if broadcast_alice {
8860 check_closed_broadcast!(nodes[1], true);
8861 check_added_monitors!(nodes[1], 1);
8862 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8867 // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8868 // process of removing the HTLC from their commitment transactions.
8869 nodes[2].node.claim_funds(payment_preimage);
8870 check_added_monitors!(nodes[2], 1);
8871 expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8873 let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8874 assert!(carol_updates.update_add_htlcs.is_empty());
8875 assert!(carol_updates.update_fail_htlcs.is_empty());
8876 assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8877 assert!(carol_updates.update_fee.is_none());
8878 assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8880 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8881 let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
8882 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
8883 // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8884 if !go_onchain_before_fulfill && broadcast_alice {
8885 let events = nodes[1].node.get_and_clear_pending_msg_events();
8886 assert_eq!(events.len(), 1);
8888 MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8889 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8891 _ => panic!("Unexpected event"),
8894 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8895 // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8896 // Carol<->Bob's updated commitment transaction info.
8897 check_added_monitors!(nodes[1], 2);
8899 let events = nodes[1].node.get_and_clear_pending_msg_events();
8900 assert_eq!(events.len(), 2);
8901 let bob_revocation = match events[0] {
8902 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8903 assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8906 _ => panic!("Unexpected event"),
8908 let bob_updates = match events[1] {
8909 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8910 assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8913 _ => panic!("Unexpected event"),
8916 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8917 check_added_monitors!(nodes[2], 1);
8918 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8919 check_added_monitors!(nodes[2], 1);
8921 let events = nodes[2].node.get_and_clear_pending_msg_events();
8922 assert_eq!(events.len(), 1);
8923 let carol_revocation = match events[0] {
8924 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8925 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8928 _ => panic!("Unexpected event"),
8930 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8931 check_added_monitors!(nodes[1], 1);
8933 // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8934 // here's where we put said channel's commitment tx on-chain.
8935 let mut txn_to_broadcast = alice_txn.clone();
8936 if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8937 if !go_onchain_before_fulfill {
8938 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8939 // If Bob was the one to force-close, he will have already passed these checks earlier.
8940 if broadcast_alice {
8941 check_closed_broadcast!(nodes[1], true);
8942 check_added_monitors!(nodes[1], 1);
8943 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8945 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8946 if broadcast_alice {
8947 assert_eq!(bob_txn.len(), 1);
8948 check_spends!(bob_txn[0], txn_to_broadcast[0]);
8950 if nodes[1].connect_style.borrow().updates_best_block_first() {
8951 assert_eq!(bob_txn.len(), 3);
8952 assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
8954 assert_eq!(bob_txn.len(), 2);
8956 check_spends!(bob_txn[0], chan_ab.3);
8961 // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8962 // broadcasted commitment transaction.
8964 let script_weight = match broadcast_alice {
8965 true => OFFERED_HTLC_SCRIPT_WEIGHT,
8966 false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8968 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8969 // Bob force-closed and broadcasts the commitment transaction along with a
8970 // HTLC-output-claiming transaction.
8971 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8972 if broadcast_alice {
8973 assert_eq!(bob_txn.len(), 1);
8974 check_spends!(bob_txn[0], txn_to_broadcast[0]);
8975 assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8977 assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
8978 let htlc_tx = bob_txn.pop().unwrap();
8979 check_spends!(htlc_tx, txn_to_broadcast[0]);
8980 assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
8986 fn test_onchain_htlc_settlement_after_close() {
8987 do_test_onchain_htlc_settlement_after_close(true, true);
8988 do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
8989 do_test_onchain_htlc_settlement_after_close(true, false);
8990 do_test_onchain_htlc_settlement_after_close(false, false);
8994 fn test_duplicate_temporary_channel_id_from_different_peers() {
8995 // Tests that we can accept two different `OpenChannel` requests with the same
8996 // `temporary_channel_id`, as long as they are from different peers.
8997 let chanmon_cfgs = create_chanmon_cfgs(3);
8998 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8999 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9000 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9002 // Create an first channel channel
9003 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9004 let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
9006 // Create an second channel
9007 nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
9008 let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
9010 // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
9011 // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
9012 open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
9014 // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
9015 // `temporary_channel_id` as they are from different peers.
9016 nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
9018 let events = nodes[0].node.get_and_clear_pending_msg_events();
9019 assert_eq!(events.len(), 1);
9021 MessageSendEvent::SendAcceptChannel { node_id, msg } => {
9022 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
9023 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
9025 _ => panic!("Unexpected event"),
9029 nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
9031 let events = nodes[0].node.get_and_clear_pending_msg_events();
9032 assert_eq!(events.len(), 1);
9034 MessageSendEvent::SendAcceptChannel { node_id, msg } => {
9035 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
9036 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
9038 _ => panic!("Unexpected event"),
9044 fn test_peer_funding_sidechannel() {
9045 // Test that if a peer somehow learns which txid we'll use for our channel funding before we
9046 // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously
9047 // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning
9048 // the txid and panicked if the peer tried to open a redundant channel to us with the same
9049 // funding outpoint.
9051 // While this assumption is generally safe, some users may have out-of-band protocols where
9052 // they notify their LSP about a funding outpoint first, or this may be violated in the future
9053 // with collaborative transaction construction protocols, i.e. dual-funding.
9054 let chanmon_cfgs = create_chanmon_cfgs(3);
9055 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9056 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9057 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9059 let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9060 let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0);
9062 let (_, tx, funding_output) =
9063 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9065 let cs_funding_events = nodes[2].node.get_and_clear_pending_events();
9066 assert_eq!(cs_funding_events.len(), 1);
9067 match cs_funding_events[0] {
9068 Event::FundingGenerationReady { .. } => {}
9069 _ => panic!("Unexpected event {:?}", cs_funding_events),
9072 nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap();
9073 let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id());
9074 nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9075 get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id());
9076 expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id());
9077 check_added_monitors!(nodes[0], 1);
9079 let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone());
9080 let err_msg = format!("{:?}", res.unwrap_err());
9081 assert!(err_msg.contains("An existing channel using outpoint "));
9082 assert!(err_msg.contains(" is open with peer"));
9083 // Even though the last funding_transaction_generated errored, it still generated a
9084 // SendFundingCreated. However, when the peer responds with a funding_signed it will send the
9085 // appropriate error message.
9086 let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9087 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created);
9088 check_added_monitors!(nodes[1], 1);
9089 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9090 let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
9091 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
9093 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9094 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9095 get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9099 fn test_duplicate_conflicting_funding_from_second_peer() {
9100 // Test that if a user tries to fund a channel with a funding outpoint they'd previously used
9101 // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we
9102 // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks
9103 // implies the user (and our counterparty) has reused cryptographic keys across channels, which
9104 // we require the user not do.
9105 let chanmon_cfgs = create_chanmon_cfgs(4);
9106 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9107 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9108 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9110 let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9112 let (_, tx, funding_output) =
9113 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9115 // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into
9116 // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails.
9117 let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3;
9118 let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone();
9119 nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap();
9121 nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9123 let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9124 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9125 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9126 check_added_monitors!(nodes[1], 1);
9127 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9129 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9130 // At this point, the channel should be closed, after having generated one monitor write (the
9131 // watch_channel call which failed), but zero monitor updates.
9132 check_added_monitors!(nodes[0], 1);
9133 get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9134 let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() };
9135 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]);
9139 fn test_duplicate_funding_err_in_funding() {
9140 // Test that if we have a live channel with one peer, then another peer comes along and tries
9141 // to create a second channel with the same txid we'll fail and not overwrite the
9142 // outpoint_to_peer map in `ChannelManager`.
9144 // This was previously broken.
9145 let chanmon_cfgs = create_chanmon_cfgs(3);
9146 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9147 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9148 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9150 let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
9151 let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
9152 assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
9154 nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
9155 let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9156 let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
9157 open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
9158 nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
9159 let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
9160 accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
9161 nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
9163 // Now that we have a second channel with the same funding txo, send a bogus funding message
9164 // and let nodes[1] remove the inbound channel.
9165 let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42);
9167 nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap();
9169 let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9170 funding_created_msg.temporary_channel_id = real_channel_id;
9171 // Make the signature invalid by changing the funding output
9172 funding_created_msg.funding_output_index += 10;
9173 nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9174 get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
9175 let err = "Invalid funding_created signature from peer".to_owned();
9176 let reason = ClosureReason::ProcessingError { err };
9177 let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason);
9178 check_closed_events(&nodes[1], &[expected_closing]);
9181 *nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(),
9182 nodes[0].node.get_our_node_id()
9187 fn test_duplicate_chan_id() {
9188 // Test that if a given peer tries to open a channel with the same channel_id as one that is
9189 // already open we reject it and keep the old channel.
9191 // Previously, full_stack_target managed to figure out that if you tried to open two channels
9192 // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
9193 // the existing channel when we detect the duplicate new channel, screwing up our monitor
9194 // updating logic for the existing channel.
9195 let chanmon_cfgs = create_chanmon_cfgs(2);
9196 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9197 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9198 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9200 // Create an initial channel
9201 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9202 let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9203 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9204 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9206 // Try to create a second channel with the same temporary_channel_id as the first and check
9207 // that it is rejected.
9208 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9210 let events = nodes[1].node.get_and_clear_pending_msg_events();
9211 assert_eq!(events.len(), 1);
9213 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9214 // Technically, at this point, nodes[1] would be justified in thinking both the
9215 // first (valid) and second (invalid) channels are closed, given they both have
9216 // the same non-temporary channel_id. However, currently we do not, so we just
9217 // move forward with it.
9218 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9219 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9221 _ => panic!("Unexpected event"),
9225 // Move the first channel through the funding flow...
9226 let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9228 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9229 check_added_monitors!(nodes[0], 0);
9231 let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9232 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9234 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
9235 assert_eq!(added_monitors.len(), 1);
9236 assert_eq!(added_monitors[0].0, funding_output);
9237 added_monitors.clear();
9239 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9241 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9243 let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
9244 let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
9246 // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
9249 // First try to open a second channel with a temporary channel id equal to the txid-based one.
9250 // Technically this is allowed by the spec, but we don't support it and there's little reason
9251 // to. Still, it shouldn't cause any other issues.
9252 open_chan_msg.common_fields.temporary_channel_id = channel_id;
9253 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9255 let events = nodes[1].node.get_and_clear_pending_msg_events();
9256 assert_eq!(events.len(), 1);
9258 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9259 // Technically, at this point, nodes[1] would be justified in thinking both
9260 // channels are closed, but currently we do not, so we just move forward with it.
9261 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9262 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9264 _ => panic!("Unexpected event"),
9268 // Now try to create a second channel which has a duplicate funding output.
9269 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9270 let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9271 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
9272 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9273 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
9275 let funding_created = {
9276 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9277 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9278 // Once we call `get_funding_created` the channel has a duplicate channel_id as
9279 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
9280 // try to create another channel. Instead, we drop the channel entirely here (leaving the
9281 // channelmanager in a possibly nonsense state instead).
9282 match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
9283 ChannelPhase::UnfundedOutboundV1(mut chan) => {
9284 let logger = test_utils::TestLogger::new();
9285 chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
9287 _ => panic!("Unexpected ChannelPhase variant"),
9290 check_added_monitors!(nodes[0], 0);
9291 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9292 // At this point we'll look up if the channel_id is present and immediately fail the channel
9293 // without trying to persist the `ChannelMonitor`.
9294 check_added_monitors!(nodes[1], 0);
9296 check_closed_events(&nodes[1], &[
9297 ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
9298 err: "Already had channel with the new channel_id".to_owned()
9302 // ...still, nodes[1] will reject the duplicate channel.
9304 let events = nodes[1].node.get_and_clear_pending_msg_events();
9305 assert_eq!(events.len(), 1);
9307 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9308 // Technically, at this point, nodes[1] would be justified in thinking both
9309 // channels are closed, but currently we do not, so we just move forward with it.
9310 assert_eq!(msg.channel_id, channel_id);
9311 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9313 _ => panic!("Unexpected event"),
9317 // finally, finish creating the original channel and send a payment over it to make sure
9318 // everything is functional.
9319 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9321 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9322 assert_eq!(added_monitors.len(), 1);
9323 assert_eq!(added_monitors[0].0, funding_output);
9324 added_monitors.clear();
9326 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9328 let events_4 = nodes[0].node.get_and_clear_pending_events();
9329 assert_eq!(events_4.len(), 0);
9330 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9331 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9333 let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9334 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9335 update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9337 send_payment(&nodes[0], &[&nodes[1]], 8000000);
9341 fn test_error_chans_closed() {
9342 // Test that we properly handle error messages, closing appropriate channels.
9344 // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9345 // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9346 // we can test various edge cases around it to ensure we don't regress.
9347 let chanmon_cfgs = create_chanmon_cfgs(3);
9348 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9349 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9350 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9352 // Create some initial channels
9353 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9354 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9355 let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
9357 assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9358 assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9359 assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9361 // Closing a channel from a different peer has no effect
9362 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9363 assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9365 // Closing one channel doesn't impact others
9366 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9367 check_added_monitors!(nodes[0], 1);
9368 check_closed_broadcast!(nodes[0], false);
9369 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9370 [nodes[1].node.get_our_node_id()], 100000);
9371 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9372 assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9373 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9374 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9376 // A null channel ID should close all channels
9377 let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9378 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
9379 check_added_monitors!(nodes[0], 2);
9380 check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9381 [nodes[1].node.get_our_node_id(); 2], 100000);
9382 let events = nodes[0].node.get_and_clear_pending_msg_events();
9383 assert_eq!(events.len(), 2);
9385 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9386 assert_eq!(msg.contents.flags & 2, 2);
9388 _ => panic!("Unexpected event"),
9391 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9392 assert_eq!(msg.contents.flags & 2, 2);
9394 _ => panic!("Unexpected event"),
9396 // Note that at this point users of a standard PeerHandler will end up calling
9397 // peer_disconnected.
9398 assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9399 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9401 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9402 assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9403 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9407 fn test_invalid_funding_tx() {
9408 // Test that we properly handle invalid funding transactions sent to us from a peer.
9410 // Previously, all other major lightning implementations had failed to properly sanitize
9411 // funding transactions from their counterparties, leading to a multi-implementation critical
9412 // security vulnerability (though we always sanitized properly, we've previously had
9413 // un-released crashes in the sanitization process).
9415 // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9416 // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9417 // gave up on it. We test this here by generating such a transaction.
9418 let chanmon_cfgs = create_chanmon_cfgs(2);
9419 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9420 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9421 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9423 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap();
9424 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9425 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9427 let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9429 // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9430 // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9431 // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9433 let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9434 let wit_program_script: ScriptBuf = wit_program.into();
9435 for output in tx.output.iter_mut() {
9436 // Make the confirmed funding transaction have a bogus script_pubkey
9437 output.script_pubkey = ScriptBuf::new_v0_p2wsh(&wit_program_script.wscript_hash());
9440 nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9441 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9442 check_added_monitors!(nodes[1], 1);
9443 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9445 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9446 check_added_monitors!(nodes[0], 1);
9447 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9449 let events_1 = nodes[0].node.get_and_clear_pending_events();
9450 assert_eq!(events_1.len(), 0);
9452 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9453 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9454 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9456 let expected_err = "funding tx had wrong script/value or output index";
9457 confirm_transaction_at(&nodes[1], &tx, 1);
9458 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
9459 [nodes[0].node.get_our_node_id()], 100000);
9460 check_added_monitors!(nodes[1], 1);
9461 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9462 assert_eq!(events_2.len(), 1);
9463 if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9464 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9465 if let msgs::ErrorAction::DisconnectPeer { msg } = action {
9466 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err);
9467 } else { panic!(); }
9468 } else { panic!(); }
9469 assert_eq!(nodes[1].node.list_channels().len(), 0);
9471 // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9472 // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9473 // as its not 32 bytes long.
9474 let mut spend_tx = Transaction {
9475 version: 2i32, lock_time: LockTime::ZERO,
9476 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9477 previous_output: BitcoinOutPoint {
9481 script_sig: ScriptBuf::new(),
9482 sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9483 witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness())
9485 output: vec![TxOut {
9487 script_pubkey: ScriptBuf::new(),
9490 check_spends!(spend_tx, tx);
9491 mine_transaction(&nodes[1], &spend_tx);
9495 fn test_coinbase_funding_tx() {
9496 // Miners are able to fund channels directly from coinbase transactions, however
9497 // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
9498 // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
9499 // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
9501 // Note that 0conf channels with coinbase funding transactions are unaffected and are
9502 // immediately operational after opening.
9503 let chanmon_cfgs = create_chanmon_cfgs(2);
9504 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9505 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9506 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9508 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9509 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9511 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9512 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9514 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9516 // Create the coinbase funding transaction.
9517 let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9519 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9520 check_added_monitors!(nodes[0], 0);
9521 let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9523 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9524 check_added_monitors!(nodes[1], 1);
9525 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9527 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9529 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9530 check_added_monitors!(nodes[0], 1);
9532 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9533 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
9535 // Starting at height 0, we "confirm" the coinbase at height 1.
9536 confirm_transaction_at(&nodes[0], &tx, 1);
9537 // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
9538 connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
9539 // Check that we have no pending message events (we have not queued a `channel_ready` yet).
9540 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
9541 // Now connect one more block which results in 100 confirmations of the coinbase transaction.
9542 connect_blocks(&nodes[0], 1);
9543 // There should now be a `channel_ready` which can be handled.
9544 let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
9546 confirm_transaction_at(&nodes[1], &tx, 1);
9547 connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
9548 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
9549 connect_blocks(&nodes[1], 1);
9550 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
9551 create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
9554 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9555 // In the first version of the chain::Confirm interface, after a refactor was made to not
9556 // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9557 // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9558 // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9559 // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9560 // spending transaction until height N+1 (or greater). This was due to the way
9561 // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9562 // spending transaction at the height the input transaction was confirmed at, not whether we
9563 // should broadcast a spending transaction at the current height.
9564 // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9565 // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9566 // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9567 // until we learned about an additional block.
9569 // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9570 // aren't broadcasting transactions too early (ie not broadcasting them at all).
9571 let chanmon_cfgs = create_chanmon_cfgs(3);
9572 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9573 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9574 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9575 *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9577 create_announced_chan_between_nodes(&nodes, 0, 1);
9578 let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9579 let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9580 nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9581 nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9583 nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9584 check_closed_broadcast!(nodes[1], true);
9585 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
9586 check_added_monitors!(nodes[1], 1);
9587 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9588 assert_eq!(node_txn.len(), 1);
9590 let conf_height = nodes[1].best_block_info().1;
9591 if !test_height_before_timelock {
9592 connect_blocks(&nodes[1], 24 * 6);
9594 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9595 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9596 if test_height_before_timelock {
9597 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9598 // generate any events or broadcast any transactions
9599 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9600 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9602 // We should broadcast an HTLC transaction spending our funding transaction first
9603 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9604 assert_eq!(spending_txn.len(), 2);
9605 let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
9610 check_spends!(htlc_tx, node_txn[0]);
9611 // We should also generate a SpendableOutputs event with the to_self output (as its
9613 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9614 assert_eq!(descriptor_spend_txn.len(), 1);
9616 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9617 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9618 // additional block built on top of the current chain.
9619 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9620 &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
9621 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9622 check_added_monitors!(nodes[1], 1);
9624 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9625 assert!(updates.update_add_htlcs.is_empty());
9626 assert!(updates.update_fulfill_htlcs.is_empty());
9627 assert_eq!(updates.update_fail_htlcs.len(), 1);
9628 assert!(updates.update_fail_malformed_htlcs.is_empty());
9629 assert!(updates.update_fee.is_none());
9630 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9631 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9632 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9637 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9638 do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9639 do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9642 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9643 let chanmon_cfgs = create_chanmon_cfgs(2);
9644 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9645 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9646 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9648 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9650 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9651 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
9652 let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
9654 let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9657 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9658 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9659 check_added_monitors!(nodes[0], 1);
9660 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9661 assert_eq!(events.len(), 1);
9662 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9663 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9664 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9666 expect_pending_htlcs_forwardable!(nodes[1]);
9667 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9670 // Note that we use a different PaymentId here to allow us to duplicativly pay
9671 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9672 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9673 check_added_monitors!(nodes[0], 1);
9674 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9675 assert_eq!(events.len(), 1);
9676 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9677 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9678 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9679 // At this point, nodes[1] would notice it has too much value for the payment. It will
9680 // assume the second is a privacy attack (no longer particularly relevant
9681 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9682 // the first HTLC delivered above.
9685 expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9686 nodes[1].node.process_pending_htlc_forwards();
9688 if test_for_second_fail_panic {
9689 // Now we go fail back the first HTLC from the user end.
9690 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9692 let expected_destinations = vec![
9693 HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9694 HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9696 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations);
9697 nodes[1].node.process_pending_htlc_forwards();
9699 check_added_monitors!(nodes[1], 1);
9700 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9701 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9703 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9704 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9705 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9707 let failure_events = nodes[0].node.get_and_clear_pending_events();
9708 assert_eq!(failure_events.len(), 4);
9709 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9710 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9711 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9712 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9714 // Let the second HTLC fail and claim the first
9715 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9716 nodes[1].node.process_pending_htlc_forwards();
9718 check_added_monitors!(nodes[1], 1);
9719 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9720 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9721 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9723 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9725 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9730 fn test_dup_htlc_second_fail_panic() {
9731 // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9732 // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9733 // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9734 // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9735 do_test_dup_htlc_second_rejected(true);
9739 fn test_dup_htlc_second_rejected() {
9740 // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9741 // simply reject the second HTLC but are still able to claim the first HTLC.
9742 do_test_dup_htlc_second_rejected(false);
9746 fn test_inconsistent_mpp_params() {
9747 // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9748 // such HTLC and allow the second to stay.
9749 let chanmon_cfgs = create_chanmon_cfgs(4);
9750 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9751 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9752 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9754 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9755 create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9756 create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9757 let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9759 let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9760 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
9761 let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
9762 assert_eq!(route.paths.len(), 2);
9763 route.paths.sort_by(|path_a, _| {
9764 // Sort the path so that the path through nodes[1] comes first
9765 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9766 core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9769 let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9771 let cur_height = nodes[0].best_block_info().1;
9772 let payment_id = PaymentId([42; 32]);
9774 let session_privs = {
9775 // We create a fake route here so that we start with three pending HTLCs, which we'll
9776 // ultimately have, just not right away.
9777 let mut dup_route = route.clone();
9778 dup_route.paths.push(route.paths[1].clone());
9779 nodes[0].node.test_add_new_pending_payment(our_payment_hash,
9780 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap()
9782 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
9783 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9784 &None, session_privs[0]).unwrap();
9785 check_added_monitors!(nodes[0], 1);
9788 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9789 assert_eq!(events.len(), 1);
9790 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9792 assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9794 nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9795 RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9796 check_added_monitors!(nodes[0], 1);
9799 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9800 assert_eq!(events.len(), 1);
9801 let payment_event = SendEvent::from_event(events.pop().unwrap());
9803 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9804 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9806 expect_pending_htlcs_forwardable!(nodes[2]);
9807 check_added_monitors!(nodes[2], 1);
9809 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9810 assert_eq!(events.len(), 1);
9811 let payment_event = SendEvent::from_event(events.pop().unwrap());
9813 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9814 check_added_monitors!(nodes[3], 0);
9815 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9817 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9818 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9819 // post-payment_secrets) and fail back the new HTLC.
9821 expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9822 nodes[3].node.process_pending_htlc_forwards();
9823 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9824 nodes[3].node.process_pending_htlc_forwards();
9826 check_added_monitors!(nodes[3], 1);
9828 let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9829 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9830 commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9832 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9833 check_added_monitors!(nodes[2], 1);
9835 let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9836 nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9837 commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9839 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9841 nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9842 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9843 &None, session_privs[2]).unwrap();
9844 check_added_monitors!(nodes[0], 1);
9846 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9847 assert_eq!(events.len(), 1);
9848 pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9850 do_claim_payment_along_route(
9851 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage)
9853 expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
9857 fn test_double_partial_claim() {
9858 // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9859 // time out, the sender resends only some of the MPP parts, then the user processes the
9860 // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9862 let chanmon_cfgs = create_chanmon_cfgs(4);
9863 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9864 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9865 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9867 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9868 create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9869 create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9870 create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9872 let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9873 assert_eq!(route.paths.len(), 2);
9874 route.paths.sort_by(|path_a, _| {
9875 // Sort the path so that the path through nodes[1] comes first
9876 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9877 core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9880 send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9881 // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9882 // amount of time to respond to.
9884 // Connect some blocks to time out the payment
9885 connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9886 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9888 let failed_destinations = vec![
9889 HTLCDestination::FailedPayment { payment_hash },
9890 HTLCDestination::FailedPayment { payment_hash },
9892 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9894 pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
9896 // nodes[1] now retries one of the two paths...
9897 nodes[0].node.send_payment_with_route(&route, payment_hash,
9898 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9899 check_added_monitors!(nodes[0], 2);
9901 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9902 assert_eq!(events.len(), 2);
9903 let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9904 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9906 // At this point nodes[3] has received one half of the payment, and the user goes to handle
9907 // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9908 nodes[3].node.claim_funds(payment_preimage);
9909 check_added_monitors!(nodes[3], 0);
9910 assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9913 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9914 #[derive(Clone, Copy, PartialEq)]
9915 enum ExposureEvent {
9916 /// Breach occurs at HTLC forwarding (see `send_htlc`)
9918 /// Breach occurs at HTLC reception (see `update_add_htlc`)
9920 /// Breach occurs at outbound update_fee (see `send_update_fee`)
9921 AtUpdateFeeOutbound,
9924 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) {
9925 // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9928 // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9929 // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9930 // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9931 // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9932 // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9933 // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9934 // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9935 // might be available again for HTLC processing once the dust bandwidth has cleared up.
9937 let chanmon_cfgs = create_chanmon_cfgs(2);
9938 let mut config = test_default_channel_config();
9940 // We hard-code the feerate values here but they're re-calculated furter down and asserted.
9941 // If the values ever change below these constants should simply be updated.
9942 const AT_FEE_OUTBOUND_HTLCS: u64 = 20;
9943 let nondust_htlc_count_in_limit =
9944 if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
9945 AT_FEE_OUTBOUND_HTLCS
9947 let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 };
9948 let expected_dust_buffer_feerate = initial_feerate + 2530;
9949 let mut commitment_tx_cost = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty());
9950 commitment_tx_cost +=
9952 htlc_success_tx_weight(&ChannelTypeFeatures::empty())
9954 htlc_timeout_tx_weight(&ChannelTypeFeatures::empty())
9955 } * (initial_feerate as u64 - 253) / 1000 * nondust_htlc_count_in_limit;
9957 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9958 *feerate_lock = initial_feerate;
9960 config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
9961 // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
9962 // to get roughly the same initial value as the default setting when this test was
9963 // originally written.
9964 MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost) / 253)
9965 } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost) };
9966 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9967 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9968 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9970 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
9971 let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9972 open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
9973 open_channel.common_fields.max_accepted_htlcs = 60;
9975 open_channel.common_fields.dust_limit_satoshis = 546;
9977 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9978 let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9979 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9981 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9983 let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9986 let mut node_0_per_peer_lock;
9987 let mut node_0_peer_state_lock;
9988 match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
9989 ChannelPhase::UnfundedOutboundV1(chan) => {
9990 chan.context.holder_dust_limit_satoshis = 546;
9992 _ => panic!("Unexpected ChannelPhase variant"),
9996 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9997 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9998 check_added_monitors!(nodes[1], 1);
9999 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10001 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
10002 check_added_monitors!(nodes[0], 1);
10003 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
10005 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
10006 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
10007 update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
10010 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10011 *feerate_lock = 253;
10014 // Fetch a route in advance as we will be unable to once we're unable to send.
10015 let (mut route, payment_hash, _, payment_secret) =
10016 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
10018 let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
10019 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10020 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10021 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
10022 (chan.context().get_dust_buffer_feerate(None) as u64,
10023 chan.context().get_max_dust_htlc_exposure_msat(253))
10025 assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64);
10026 let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
10027 let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
10029 // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit.
10030 // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1
10031 // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`.
10032 let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10033 let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
10035 // This test was written with a fixed dust value here, which we retain, but assert that it is,
10036 // indeed, dust on both transactions.
10037 let dust_htlc_on_counterparty_tx: u64 = 4;
10038 let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000;
10039 let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10040 assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat);
10041 assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat);
10044 if dust_outbound_balance {
10045 // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10046 // Outbound dust balance: 4372 sats
10047 // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
10048 for _ in 0..dust_outbound_htlc_on_holder_tx {
10049 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
10050 nodes[0].node.send_payment_with_route(&route, payment_hash,
10051 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10054 // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10055 // Inbound dust balance: 4372 sats
10056 // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
10057 for _ in 0..dust_inbound_htlc_on_holder_tx {
10058 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
10062 if dust_outbound_balance {
10063 // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10064 // Outbound dust balance: 5000 sats
10065 for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10066 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
10067 nodes[0].node.send_payment_with_route(&route, payment_hash,
10068 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10071 // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10072 // Inbound dust balance: 5000 sats
10073 for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10074 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
10079 if exposure_breach_event == ExposureEvent::AtHTLCForward {
10080 route.paths[0].hops.last_mut().unwrap().fee_msat =
10081 if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
10082 // With default dust exposure: 5000 sats
10084 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10085 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10086 ), true, APIError::ChannelUnavailable { .. }, {});
10088 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10089 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10090 ), true, APIError::ChannelUnavailable { .. }, {});
10092 } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
10093 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
10094 nodes[1].node.send_payment_with_route(&route, payment_hash,
10095 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10096 check_added_monitors!(nodes[1], 1);
10097 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
10098 assert_eq!(events.len(), 1);
10099 let payment_event = SendEvent::from_event(events.remove(0));
10100 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
10101 // With default dust exposure: 5000 sats
10103 // Outbound dust balance: 6399 sats
10104 let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
10105 let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
10106 nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
10108 // Outbound dust balance: 5200 sats
10109 nodes[0].logger.assert_log("lightning::ln::channel",
10110 format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
10111 dust_htlc_on_counterparty_tx_msat * dust_htlc_on_counterparty_tx + commitment_tx_cost + 4,
10112 max_dust_htlc_exposure_msat), 1);
10114 } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
10115 route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
10116 // For the multiplier dust exposure limit, since it scales with feerate,
10117 // we need to add a lot of HTLCs that will become dust at the new feerate
10118 // to cross the threshold.
10119 for _ in 0..AT_FEE_OUTBOUND_HTLCS {
10120 let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
10121 nodes[0].node.send_payment_with_route(&route, payment_hash,
10122 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10125 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10126 *feerate_lock = *feerate_lock * 10;
10128 nodes[0].node.timer_tick_occurred();
10129 check_added_monitors!(nodes[0], 1);
10130 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
10133 let _ = nodes[0].node.get_and_clear_pending_msg_events();
10134 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
10135 added_monitors.clear();
10138 fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) {
10139 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10140 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10141 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10142 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10143 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10144 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10145 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10146 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10147 if !multiplier_dust_limit && !apply_excess_fee {
10148 // Because non-dust HTLC transaction fees are included in the dust exposure, trying to
10149 // increase the fee to hit a higher dust exposure with a
10150 // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these
10151 // in the `multiplier_dust_limit` case.
10152 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10153 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10154 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10155 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10160 fn test_max_dust_htlc_exposure() {
10161 do_test_max_dust_htlc_exposure_by_threshold_type(false, false);
10162 do_test_max_dust_htlc_exposure_by_threshold_type(false, true);
10163 do_test_max_dust_htlc_exposure_by_threshold_type(true, false);
10164 do_test_max_dust_htlc_exposure_by_threshold_type(true, true);
10168 fn test_nondust_htlc_fees_are_dust() {
10169 // Test that the transaction fees paid in nondust HTLCs count towards our dust limit
10170 let chanmon_cfgs = create_chanmon_cfgs(3);
10171 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10173 let mut config = test_default_channel_config();
10174 // Set the dust limit to the default value
10175 config.channel_config.max_dust_htlc_exposure =
10176 MaxDustHTLCExposure::FeeRateMultiplier(10_000);
10177 // Make sure the HTLC limits don't get in the way
10178 config.channel_handshake_limits.min_max_accepted_htlcs = 400;
10179 config.channel_handshake_config.our_max_accepted_htlcs = 400;
10180 config.channel_handshake_config.our_htlc_minimum_msat = 1;
10182 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
10183 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10185 // Create a channel from 1 -> 0 but immediately push all of the funds towards 0
10186 let chan_id_1 = create_announced_chan_between_nodes(&nodes, 1, 0).2;
10187 while nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat > 0 {
10188 send_payment(&nodes[1], &[&nodes[0]], nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat);
10191 // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs
10192 // repeatedly until we run out of space.
10193 const HTLC_VALUE: u64 = 1_000_000; // Doesn't matter, tune until the test passes
10194 let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE).0;
10196 while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 {
10197 route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE);
10199 assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0,
10200 "We don't want to run out of ability to send because of some non-dust limit");
10201 assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10,
10202 "We should be able to fill our dust limit without too many HTLCs");
10204 let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat;
10205 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
10206 assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0,
10207 "Make sure we are able to send once we clear one HTLC");
10209 // At this point we have somewhere between dust_limit and dust_limit * 2 left in our dust
10210 // exposure limit, and we want to max that out using non-dust HTLCs.
10211 let commitment_tx_per_htlc_cost =
10212 htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * 253;
10213 let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost;
10214 assert!(max_htlcs_remaining < 30,
10215 "We should be able to fill our dust limit without too many HTLCs");
10216 for i in 0..max_htlcs_remaining + 1 {
10217 assert_ne!(i, max_htlcs_remaining);
10218 if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat < dust_limit {
10219 // We found our limit, and it was less than max_htlcs_remaining!
10220 // At this point we can only send dust HTLCs as any non-dust HTLCs will overuse our
10221 // remaining dust exposure.
10224 route_payment(&nodes[0], &[&nodes[1]], dust_limit * 2);
10227 // At this point non-dust HTLCs are no longer accepted from node 0 -> 1, we also check that
10228 // such HTLCs can't be routed over the same channel either.
10229 create_announced_chan_between_nodes(&nodes, 2, 0);
10230 let (route, payment_hash, _, payment_secret) =
10231 get_route_and_payment_hash!(nodes[2], nodes[1], dust_limit * 2);
10232 let onion = RecipientOnionFields::secret_only(payment_secret);
10233 nodes[2].node.send_payment_with_route(&route, payment_hash, onion, PaymentId([0; 32])).unwrap();
10234 check_added_monitors(&nodes[2], 1);
10235 let send = SendEvent::from_node(&nodes[2]);
10237 nodes[0].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send.msgs[0]);
10238 commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true);
10240 expect_pending_htlcs_forwardable!(nodes[0]);
10241 check_added_monitors(&nodes[0], 1);
10242 let node_id_1 = nodes[1].node.get_our_node_id();
10243 expect_htlc_handling_failed_destinations!(
10244 nodes[0].node.get_and_clear_pending_events(),
10245 &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }]
10248 let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id());
10249 nodes[2].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]);
10250 commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false);
10251 expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new());
10256 fn test_non_final_funding_tx() {
10257 let chanmon_cfgs = create_chanmon_cfgs(2);
10258 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10259 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10260 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10262 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10263 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10264 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10265 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10266 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10268 let best_height = nodes[0].node.best_block.read().unwrap().height;
10270 let chan_id = *nodes[0].network_chan_count.borrow();
10271 let events = nodes[0].node.get_and_clear_pending_events();
10272 let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[&[1]]) };
10273 assert_eq!(events.len(), 1);
10274 let mut tx = match events[0] {
10275 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10276 // Timelock the transaction _beyond_ the best client height + 1.
10277 Transaction { version: chan_id as i32, lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut {
10278 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
10281 _ => panic!("Unexpected event"),
10283 // Transaction should fail as it's evaluated as non-final for propagation.
10284 match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
10285 Err(APIError::APIMisuseError { err }) => {
10286 assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
10290 let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned();
10291 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]);
10292 assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel");
10296 fn test_non_final_funding_tx_within_headroom() {
10297 let chanmon_cfgs = create_chanmon_cfgs(2);
10298 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10299 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10300 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10302 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10303 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10304 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10305 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10306 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10308 let best_height = nodes[0].node.best_block.read().unwrap().height;
10310 let chan_id = *nodes[0].network_chan_count.borrow();
10311 let events = nodes[0].node.get_and_clear_pending_events();
10312 let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[[1]]) };
10313 assert_eq!(events.len(), 1);
10314 let mut tx = match events[0] {
10315 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10316 // Timelock the transaction within a +1 headroom from the best block.
10317 Transaction { version: chan_id as i32, lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut {
10318 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
10321 _ => panic!("Unexpected event"),
10324 // Transaction should be accepted if it's in a +1 headroom from best block.
10325 assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
10326 get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
10330 fn accept_busted_but_better_fee() {
10331 // If a peer sends us a fee update that is too low, but higher than our previous channel
10332 // feerate, we should accept it. In the future we may want to consider closing the channel
10333 // later, but for now we only accept the update.
10334 let mut chanmon_cfgs = create_chanmon_cfgs(2);
10335 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10336 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10337 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10339 create_chan_between_nodes(&nodes[0], &nodes[1]);
10341 // Set nodes[1] to expect 5,000 sat/kW.
10343 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
10344 *feerate_lock = 5000;
10347 // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
10349 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10350 *feerate_lock = 1000;
10352 nodes[0].node.timer_tick_occurred();
10353 check_added_monitors!(nodes[0], 1);
10355 let events = nodes[0].node.get_and_clear_pending_msg_events();
10356 assert_eq!(events.len(), 1);
10358 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10359 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10360 commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10362 _ => panic!("Unexpected event"),
10365 // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
10368 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10369 *feerate_lock = 2000;
10371 nodes[0].node.timer_tick_occurred();
10372 check_added_monitors!(nodes[0], 1);
10374 let events = nodes[0].node.get_and_clear_pending_msg_events();
10375 assert_eq!(events.len(), 1);
10377 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10378 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10379 commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10381 _ => panic!("Unexpected event"),
10384 // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
10387 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10388 *feerate_lock = 1000;
10390 nodes[0].node.timer_tick_occurred();
10391 check_added_monitors!(nodes[0], 1);
10393 let events = nodes[0].node.get_and_clear_pending_msg_events();
10394 assert_eq!(events.len(), 1);
10396 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
10397 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10398 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
10399 err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000".to_owned() },
10400 [nodes[0].node.get_our_node_id()], 100000);
10401 check_closed_broadcast!(nodes[1], true);
10402 check_added_monitors!(nodes[1], 1);
10404 _ => panic!("Unexpected event"),
10408 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
10409 let mut chanmon_cfgs = create_chanmon_cfgs(2);
10410 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10411 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10412 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10413 let min_final_cltv_expiry_delta = 120;
10414 let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
10415 min_final_cltv_expiry_delta - 2 };
10416 let recv_value = 100_000;
10418 create_chan_between_nodes(&nodes[0], &nodes[1]);
10420 let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
10421 let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
10422 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
10423 Some(recv_value), Some(min_final_cltv_expiry_delta));
10424 (payment_hash, payment_preimage, payment_secret)
10426 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
10427 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
10429 let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap();
10430 nodes[0].node.send_payment_with_route(&route, payment_hash,
10431 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10432 check_added_monitors!(nodes[0], 1);
10433 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
10434 assert_eq!(events.len(), 1);
10435 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
10436 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
10437 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
10438 expect_pending_htlcs_forwardable!(nodes[1]);
10441 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
10442 None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
10444 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
10446 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
10448 check_added_monitors!(nodes[1], 1);
10450 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
10451 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
10452 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
10454 expect_payment_failed!(nodes[0], payment_hash, true);
10459 fn test_payment_with_custom_min_cltv_expiry_delta() {
10460 do_payment_with_custom_min_final_cltv_expiry(false, false);
10461 do_payment_with_custom_min_final_cltv_expiry(false, true);
10462 do_payment_with_custom_min_final_cltv_expiry(true, false);
10463 do_payment_with_custom_min_final_cltv_expiry(true, true);
10467 fn test_disconnects_peer_awaiting_response_ticks() {
10468 // Tests that nodes which are awaiting on a response critical for channel responsiveness
10469 // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10470 let mut chanmon_cfgs = create_chanmon_cfgs(2);
10471 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10472 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10473 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10475 // Asserts a disconnect event is queued to the user.
10476 let check_disconnect_event = |node: &Node, should_disconnect: bool| {
10477 let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
10478 if let MessageSendEvent::HandleError { action, .. } = event {
10479 if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
10488 assert_eq!(disconnect_event.is_some(), should_disconnect);
10491 // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
10492 // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10493 let check_disconnect = |node: &Node| {
10494 // No disconnect without any timer ticks.
10495 check_disconnect_event(node, false);
10497 // No disconnect with 1 timer tick less than required.
10498 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
10499 node.node.timer_tick_occurred();
10500 check_disconnect_event(node, false);
10503 // Disconnect after reaching the required ticks.
10504 node.node.timer_tick_occurred();
10505 check_disconnect_event(node, true);
10507 // Disconnect again on the next tick if the peer hasn't been disconnected yet.
10508 node.node.timer_tick_occurred();
10509 check_disconnect_event(node, true);
10512 create_chan_between_nodes(&nodes[0], &nodes[1]);
10514 // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
10515 *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
10516 nodes[0].node.timer_tick_occurred();
10517 check_added_monitors!(&nodes[0], 1);
10518 let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10519 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
10520 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
10521 check_added_monitors!(&nodes[1], 1);
10523 // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
10524 let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
10525 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
10526 check_added_monitors!(&nodes[0], 1);
10527 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
10528 check_added_monitors(&nodes[0], 1);
10530 // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
10531 // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
10532 // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10533 let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
10534 check_disconnect(&nodes[1]);
10536 // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
10538 // Note that since the commitment dance didn't complete above, Alice is expected to resend her
10539 // final `RevokeAndACK` to Bob to complete it.
10540 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10541 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10542 let bob_init = msgs::Init {
10543 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10545 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
10546 let alice_init = msgs::Init {
10547 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10549 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
10551 // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
10552 // received Bob's yet, so she should disconnect him after reaching
10553 // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10554 let alice_channel_reestablish = get_event_msg!(
10555 nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
10557 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
10558 check_disconnect(&nodes[0]);
10560 // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
10561 let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
10562 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
10563 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
10569 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
10571 // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
10572 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10573 nodes[0].node.timer_tick_occurred();
10574 check_disconnect_event(&nodes[0], false);
10577 // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
10578 // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10579 check_disconnect(&nodes[1]);
10581 // Finally, have Bob process the last message.
10582 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
10583 check_added_monitors(&nodes[1], 1);
10585 // At this point, neither node should attempt to disconnect each other, since they aren't
10586 // waiting on any messages.
10587 for node in &nodes {
10588 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10589 node.node.timer_tick_occurred();
10590 check_disconnect_event(node, false);
10596 fn test_remove_expired_outbound_unfunded_channels() {
10597 let chanmon_cfgs = create_chanmon_cfgs(2);
10598 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10599 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10600 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10602 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10603 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10604 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10605 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10606 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10608 let events = nodes[0].node.get_and_clear_pending_events();
10609 assert_eq!(events.len(), 1);
10611 Event::FundingGenerationReady { .. } => (),
10612 _ => panic!("Unexpected event"),
10615 // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
10616 let check_outbound_channel_existence = |should_exist: bool| {
10617 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10618 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10619 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10622 // Channel should exist without any timer ticks.
10623 check_outbound_channel_existence(true);
10625 // Channel should exist with 1 timer tick less than required.
10626 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10627 nodes[0].node.timer_tick_occurred();
10628 check_outbound_channel_existence(true)
10631 // Remove channel after reaching the required ticks.
10632 nodes[0].node.timer_tick_occurred();
10633 check_outbound_channel_existence(false);
10635 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10636 assert_eq!(msg_events.len(), 1);
10637 match msg_events[0] {
10638 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10639 assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10641 _ => panic!("Unexpected event"),
10643 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
10647 fn test_remove_expired_inbound_unfunded_channels() {
10648 let chanmon_cfgs = create_chanmon_cfgs(2);
10649 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10650 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10651 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10653 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10654 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10655 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10656 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10657 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10659 let events = nodes[0].node.get_and_clear_pending_events();
10660 assert_eq!(events.len(), 1);
10662 Event::FundingGenerationReady { .. } => (),
10663 _ => panic!("Unexpected event"),
10666 // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
10667 let check_inbound_channel_existence = |should_exist: bool| {
10668 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
10669 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
10670 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10673 // Channel should exist without any timer ticks.
10674 check_inbound_channel_existence(true);
10676 // Channel should exist with 1 timer tick less than required.
10677 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10678 nodes[1].node.timer_tick_occurred();
10679 check_inbound_channel_existence(true)
10682 // Remove channel after reaching the required ticks.
10683 nodes[1].node.timer_tick_occurred();
10684 check_inbound_channel_existence(false);
10686 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
10687 assert_eq!(msg_events.len(), 1);
10688 match msg_events[0] {
10689 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10690 assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10692 _ => panic!("Unexpected event"),
10694 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
10698 fn test_channel_close_when_not_timely_accepted() {
10699 // Create network of two nodes
10700 let chanmon_cfgs = create_chanmon_cfgs(2);
10701 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10702 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10703 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10705 // Simulate peer-disconnects mid-handshake
10706 // The channel is initiated from the node 0 side,
10707 // but the nodes disconnect before node 1 could send accept channel
10708 let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10709 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10710 assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10712 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10713 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10715 // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10716 assert_eq!(nodes[0].node.list_channels().len(), 1);
10718 // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
10719 assert_eq!(nodes[1].node.list_channels().len(), 0);
10721 // In the meantime, some time passes.
10722 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
10723 nodes[0].node.timer_tick_occurred();
10726 // Since we disconnected from peer and did not connect back within time,
10727 // we should have forced-closed the channel by now.
10728 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
10729 assert_eq!(nodes[0].node.list_channels().len(), 0);
10732 // Since accept channel message was never received
10733 // The channel should be forced close by now from node 0 side
10734 // and the peer removed from per_peer_state
10735 let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10736 assert_eq!(node_0_per_peer_state.len(), 0);
10741 fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
10742 // Create network of two nodes
10743 let chanmon_cfgs = create_chanmon_cfgs(2);
10744 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10745 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10746 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10748 // Simulate peer-disconnects mid-handshake
10749 // The channel is initiated from the node 0 side,
10750 // but the nodes disconnect before node 1 could send accept channel
10751 let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10752 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10753 assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10755 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10756 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10758 // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10759 assert_eq!(nodes[0].node.list_channels().len(), 1);
10761 // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
10762 assert_eq!(nodes[1].node.list_channels().len(), 0);
10764 // The peers now reconnect
10765 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
10766 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10768 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
10769 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10770 }, false).unwrap();
10772 // Make sure the SendOpenChannel message is added to node_0 pending message events
10773 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10774 assert_eq!(msg_events.len(), 1);
10775 match &msg_events[0] {
10776 MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
10777 _ => panic!("Unexpected message."),
10781 fn do_test_multi_post_event_actions(do_reload: bool) {
10782 // Tests handling multiple post-Event actions at once.
10783 // There is specific code in ChannelManager to handle channels where multiple post-Event
10784 // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
10786 // Specifically, we test calling `get_and_clear_pending_events` while there are two
10787 // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
10788 // - one from an RAA and one from an inbound commitment_signed.
10789 let chanmon_cfgs = create_chanmon_cfgs(3);
10790 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10791 let (persister, chain_monitor);
10792 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10793 let nodes_0_deserialized;
10794 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10796 let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
10797 let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
10799 send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10800 send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10802 let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10803 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10805 nodes[1].node.claim_funds(our_payment_preimage);
10806 check_added_monitors!(nodes[1], 1);
10807 expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
10809 nodes[2].node.claim_funds(payment_preimage_2);
10810 check_added_monitors!(nodes[2], 1);
10811 expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
10813 for dest in &[1, 2] {
10814 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
10815 nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
10816 commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
10817 check_added_monitors(&nodes[0], 0);
10820 let (route, payment_hash_3, _, payment_secret_3) =
10821 get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
10822 let payment_id = PaymentId(payment_hash_3.0);
10823 nodes[1].node.send_payment_with_route(&route, payment_hash_3,
10824 RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
10825 check_added_monitors(&nodes[1], 1);
10827 let send_event = SendEvent::from_node(&nodes[1]);
10828 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
10829 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
10830 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
10833 let nodes_0_serialized = nodes[0].node.encode();
10834 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
10835 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
10836 reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
10838 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10839 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10841 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
10842 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
10845 let events = nodes[0].node.get_and_clear_pending_events();
10846 assert_eq!(events.len(), 4);
10847 if let Event::PaymentSent { payment_preimage, .. } = events[0] {
10848 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10849 } else { panic!(); }
10850 if let Event::PaymentSent { payment_preimage, .. } = events[1] {
10851 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10852 } else { panic!(); }
10853 if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
10854 if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
10856 // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
10857 // completion, we'll respond to nodes[1] with an RAA + CS.
10858 get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10859 check_added_monitors(&nodes[0], 3);
10863 fn test_multi_post_event_actions() {
10864 do_test_multi_post_event_actions(true);
10865 do_test_multi_post_event_actions(false);
10869 fn test_batch_channel_open() {
10870 let chanmon_cfgs = create_chanmon_cfgs(3);
10871 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10872 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10873 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10875 // Initiate channel opening and create the batch channel funding transaction.
10876 let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10877 (&nodes[1], 100_000, 0, 42, None),
10878 (&nodes[2], 200_000, 0, 43, None),
10881 // Go through the funding_created and funding_signed flow with node 1.
10882 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10883 check_added_monitors(&nodes[1], 1);
10884 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10886 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10887 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10888 check_added_monitors(&nodes[0], 1);
10890 // The transaction should not have been broadcast before all channels are ready.
10891 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
10893 // Go through the funding_created and funding_signed flow with node 2.
10894 nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
10895 check_added_monitors(&nodes[2], 1);
10896 expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
10898 let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10899 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
10900 nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
10901 check_added_monitors(&nodes[0], 1);
10903 // The transaction should not have been broadcast before persisting all monitors has been
10905 assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10906 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
10908 // Complete the persistence of the monitor.
10909 nodes[0].chain_monitor.complete_sole_pending_chan_update(
10910 &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
10912 let events = nodes[0].node.get_and_clear_pending_events();
10914 // The transaction should only have been broadcast now.
10915 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10916 assert_eq!(broadcasted_txs.len(), 1);
10917 assert_eq!(broadcasted_txs[0], tx);
10919 assert_eq!(events.len(), 2);
10920 assert!(events.iter().any(|e| matches!(
10922 crate::events::Event::ChannelPending {
10923 ref counterparty_node_id,
10925 } if counterparty_node_id == &nodes[1].node.get_our_node_id(),
10927 assert!(events.iter().any(|e| matches!(
10929 crate::events::Event::ChannelPending {
10930 ref counterparty_node_id,
10932 } if counterparty_node_id == &nodes[2].node.get_our_node_id(),
10937 fn test_close_in_funding_batch() {
10938 // This test ensures that if one of the channels
10939 // in the batch closes, the complete batch will close.
10940 let chanmon_cfgs = create_chanmon_cfgs(3);
10941 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10942 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10943 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10945 // Initiate channel opening and create the batch channel funding transaction.
10946 let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10947 (&nodes[1], 100_000, 0, 42, None),
10948 (&nodes[2], 200_000, 0, 43, None),
10951 // Go through the funding_created and funding_signed flow with node 1.
10952 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10953 check_added_monitors(&nodes[1], 1);
10954 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10956 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10957 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10958 check_added_monitors(&nodes[0], 1);
10960 // The transaction should not have been broadcast before all channels are ready.
10961 assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10963 // Force-close the channel for which we've completed the initial monitor.
10964 let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
10965 let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
10966 let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
10967 let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
10969 nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
10971 // The monitor should become closed.
10972 check_added_monitors(&nodes[0], 1);
10974 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
10975 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
10976 assert_eq!(monitor_updates_1.len(), 1);
10977 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
10980 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10981 match msg_events[0] {
10982 MessageSendEvent::HandleError { .. } => (),
10983 _ => panic!("Unexpected message."),
10986 // We broadcast the commitment transaction as part of the force-close.
10988 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10989 assert_eq!(broadcasted_txs.len(), 1);
10990 assert!(broadcasted_txs[0].txid() != tx.txid());
10991 assert_eq!(broadcasted_txs[0].input.len(), 1);
10992 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
10995 // All channels in the batch should close immediately.
10996 check_closed_events(&nodes[0], &[
10997 ExpectedCloseEvent {
10998 channel_id: Some(channel_id_1),
10999 discard_funding: true,
11000 channel_funding_txo: Some(funding_txo_1),
11001 user_channel_id: Some(42),
11002 ..Default::default()
11004 ExpectedCloseEvent {
11005 channel_id: Some(channel_id_2),
11006 discard_funding: true,
11007 channel_funding_txo: Some(funding_txo_2),
11008 user_channel_id: Some(43),
11009 ..Default::default()
11013 // Ensure the channels don't exist anymore.
11014 assert!(nodes[0].node.list_channels().is_empty());
11018 fn test_batch_funding_close_after_funding_signed() {
11019 let chanmon_cfgs = create_chanmon_cfgs(3);
11020 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
11021 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
11022 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
11024 // Initiate channel opening and create the batch channel funding transaction.
11025 let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
11026 (&nodes[1], 100_000, 0, 42, None),
11027 (&nodes[2], 200_000, 0, 43, None),
11030 // Go through the funding_created and funding_signed flow with node 1.
11031 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
11032 check_added_monitors(&nodes[1], 1);
11033 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
11035 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11036 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
11037 check_added_monitors(&nodes[0], 1);
11039 // Go through the funding_created and funding_signed flow with node 2.
11040 nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
11041 check_added_monitors(&nodes[2], 1);
11042 expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
11044 let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11045 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
11046 nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
11047 check_added_monitors(&nodes[0], 1);
11049 // The transaction should not have been broadcast before all channels are ready.
11050 assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
11052 // Force-close the channel for which we've completed the initial monitor.
11053 let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
11054 let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
11055 let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
11056 let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
11057 nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
11058 check_added_monitors(&nodes[0], 2);
11060 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
11061 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
11062 assert_eq!(monitor_updates_1.len(), 1);
11063 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11064 let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
11065 assert_eq!(monitor_updates_2.len(), 1);
11066 assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11068 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
11069 match msg_events[0] {
11070 MessageSendEvent::HandleError { .. } => (),
11071 _ => panic!("Unexpected message."),
11074 // We broadcast the commitment transaction as part of the force-close.
11076 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
11077 assert_eq!(broadcasted_txs.len(), 1);
11078 assert!(broadcasted_txs[0].txid() != tx.txid());
11079 assert_eq!(broadcasted_txs[0].input.len(), 1);
11080 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
11083 // All channels in the batch should close immediately.
11084 check_closed_events(&nodes[0], &[
11085 ExpectedCloseEvent {
11086 channel_id: Some(channel_id_1),
11087 discard_funding: true,
11088 channel_funding_txo: Some(funding_txo_1),
11089 user_channel_id: Some(42),
11090 ..Default::default()
11092 ExpectedCloseEvent {
11093 channel_id: Some(channel_id_2),
11094 discard_funding: true,
11095 channel_funding_txo: Some(funding_txo_2),
11096 user_channel_id: Some(43),
11097 ..Default::default()
11101 // Ensure the channels don't exist anymore.
11102 assert!(nodes[0].node.list_channels().is_empty());
11105 fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitment: bool) {
11106 // Tests that a node will forget the channel (when it only requires 1 confirmation) if the
11107 // funding and commitment transaction confirm in the same block.
11108 let chanmon_cfgs = create_chanmon_cfgs(2);
11109 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11110 let mut min_depth_1_block_cfg = test_default_channel_config();
11111 min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1;
11112 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg), Some(min_depth_1_block_cfg)]);
11113 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11115 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
11116 let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
11118 assert_eq!(nodes[0].node.list_channels().len(), 1);
11119 assert_eq!(nodes[1].node.list_channels().len(), 1);
11121 let (closing_node, other_node) = if confirm_remote_commitment {
11122 (&nodes[1], &nodes[0])
11124 (&nodes[0], &nodes[1])
11127 closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
11128 let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
11129 assert_eq!(msg_events.len(), 1);
11130 match msg_events.pop().unwrap() {
11131 MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {},
11132 _ => panic!("Unexpected event"),
11134 check_added_monitors(closing_node, 1);
11135 check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed, false, &[other_node.node.get_our_node_id()], 1_000_000);
11137 let commitment_tx = {
11138 let mut txn = closing_node.tx_broadcaster.txn_broadcast();
11139 assert_eq!(txn.len(), 1);
11140 let commitment_tx = txn.pop().unwrap();
11141 check_spends!(commitment_tx, funding_tx);
11145 mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]);
11146 mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]);
11148 check_closed_broadcast(other_node, 1, true);
11149 check_added_monitors(other_node, 1);
11150 check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000);
11152 assert!(nodes[0].node.list_channels().is_empty());
11153 assert!(nodes[1].node.list_channels().is_empty());
11157 fn test_funding_and_commitment_tx_confirm_same_block() {
11158 do_test_funding_and_commitment_tx_confirm_same_block(false);
11159 do_test_funding_and_commitment_tx_confirm_same_block(true);
11163 fn test_accept_inbound_channel_errors_queued() {
11164 // For manually accepted inbound channels, tests that a close error is correctly handled
11165 // and the channel fails for the initiator.
11166 let mut config0 = test_default_channel_config();
11167 let mut config1 = config0.clone();
11168 config1.channel_handshake_limits.their_to_self_delay = 1000;
11169 config1.manually_accept_inbound_channels = true;
11170 config0.channel_handshake_config.our_to_self_delay = 2000;
11172 let chanmon_cfgs = create_chanmon_cfgs(2);
11173 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11174 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
11175 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11177 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
11178 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
11180 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
11181 let events = nodes[1].node.get_and_clear_pending_events();
11183 Event::OpenChannelRequest { temporary_channel_id, .. } => {
11184 match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
11185 Err(APIError::ChannelUnavailable { err: _ }) => (),
11189 _ => panic!("Unexpected event"),
11191 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
11192 open_channel_msg.common_fields.temporary_channel_id);