1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
22 use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters};
30 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::test_channel_signer::TestChannelSigner;
34 use crate::util::test_utils::{self, WatchtowerPersister};
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::locktime::absolute::LockTime;
42 use bitcoin::blockdata::script::{Builder, ScriptBuf};
43 use bitcoin::blockdata::opcodes;
44 use bitcoin::blockdata::constants::ChainHash;
45 use bitcoin::network::constants::Network;
46 use bitcoin::{Sequence, Transaction, TxIn, TxOut, Witness};
47 use bitcoin::OutPoint as BitcoinOutPoint;
49 use bitcoin::secp256k1::Secp256k1;
50 use bitcoin::secp256k1::{PublicKey,SecretKey};
53 use crate::prelude::*;
54 use alloc::collections::BTreeSet;
55 use core::iter::repeat;
56 use bitcoin::hashes::Hash;
57 use crate::sync::{Arc, Mutex, RwLock};
59 use crate::ln::functional_test_utils::*;
60 use crate::ln::chan_utils::CommitmentTransaction;
62 use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
65 fn test_insane_channel_opens() {
66 // Stand up a network of 2 nodes
67 use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
68 let mut cfg = UserConfig::default();
69 cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
70 let chanmon_cfgs = create_chanmon_cfgs(2);
71 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
72 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
73 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
75 // Instantiate channel parameters where we push the maximum msats given our
77 let channel_value_sat = 31337; // same as funding satoshis
78 let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
79 let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
81 // Have node0 initiate a channel to node1 with aforementioned parameters
82 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap();
84 // Extract the channel open message from node0 to node1
85 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
87 // Test helper that asserts we get the correct error string given a mutator
88 // that supposedly makes the channel open message insane
89 let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
90 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
91 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
92 assert_eq!(msg_events.len(), 1);
93 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
94 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
96 &ErrorAction::SendErrorMessage { .. } => {
97 nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
99 _ => panic!("unexpected event!"),
101 } else { assert!(false); }
104 use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
106 // Test all mutations that would make the channel open message insane
107 insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
108 insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
110 insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
112 insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
114 insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
116 insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
118 insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
120 insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
122 insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
126 fn test_funding_exceeds_no_wumbo_limit() {
127 // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
129 use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
130 let chanmon_cfgs = create_chanmon_cfgs(2);
131 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
132 *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
133 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
134 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
136 match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) {
137 Err(APIError::APIMisuseError { err }) => {
138 assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
144 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
145 // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
146 // but only for them. Because some LSPs do it with some level of trust of the clients (for a
147 // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
148 // in normal testing, we test it explicitly here.
149 let chanmon_cfgs = create_chanmon_cfgs(2);
150 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
151 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
152 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
153 let default_config = UserConfig::default();
155 // Have node0 initiate a channel to node1 with aforementioned parameters
156 let mut push_amt = 100_000_000;
157 let feerate_per_kw = 253;
158 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
159 push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
160 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
162 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap();
163 let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
164 if !send_from_initiator {
165 open_channel_message.channel_reserve_satoshis = 0;
166 open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
168 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
170 // Extract the channel accept message from node1 to node0
171 let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
172 if send_from_initiator {
173 accept_channel_message.channel_reserve_satoshis = 0;
174 accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
176 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
178 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
179 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
180 let mut sender_node_per_peer_lock;
181 let mut sender_node_peer_state_lock;
183 let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
184 match channel_phase {
185 ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
186 let chan_context = channel_phase.context_mut();
187 chan_context.holder_selected_channel_reserve_satoshis = 0;
188 chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
194 let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
195 let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
196 create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
198 // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
199 // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
200 if send_from_initiator {
201 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
202 // Note that for outbound channels we have to consider the commitment tx fee and the
203 // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
204 // well as an additional HTLC.
205 - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
207 send_payment(&nodes[1], &[&nodes[0]], push_amt);
212 fn test_counterparty_no_reserve() {
213 do_test_counterparty_no_reserve(true);
214 do_test_counterparty_no_reserve(false);
218 fn test_async_inbound_update_fee() {
219 let chanmon_cfgs = create_chanmon_cfgs(2);
220 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
221 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
222 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
223 create_announced_chan_between_nodes(&nodes, 0, 1);
226 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
230 // send (1) commitment_signed -.
231 // <- update_add_htlc/commitment_signed
232 // send (2) RAA (awaiting remote revoke) -.
233 // (1) commitment_signed is delivered ->
234 // .- send (3) RAA (awaiting remote revoke)
235 // (2) RAA is delivered ->
236 // .- send (4) commitment_signed
237 // <- (3) RAA is delivered
238 // send (5) commitment_signed -.
239 // <- (4) commitment_signed is delivered
241 // (5) commitment_signed is delivered ->
243 // (6) RAA is delivered ->
245 // First nodes[0] generates an update_fee
247 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
250 nodes[0].node.timer_tick_occurred();
251 check_added_monitors!(nodes[0], 1);
253 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
254 assert_eq!(events_0.len(), 1);
255 let (update_msg, commitment_signed) = match events_0[0] { // (1)
256 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
257 (update_fee.as_ref(), commitment_signed)
259 _ => panic!("Unexpected event"),
262 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
264 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
265 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
266 nodes[1].node.send_payment_with_route(&route, our_payment_hash,
267 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
268 check_added_monitors!(nodes[1], 1);
270 let payment_event = {
271 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
272 assert_eq!(events_1.len(), 1);
273 SendEvent::from_event(events_1.remove(0))
275 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
276 assert_eq!(payment_event.msgs.len(), 1);
278 // ...now when the messages get delivered everyone should be happy
279 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
280 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
281 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
282 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
283 check_added_monitors!(nodes[0], 1);
285 // deliver(1), generate (3):
286 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
287 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
288 // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
289 check_added_monitors!(nodes[1], 1);
291 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
292 let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
293 assert!(bs_update.update_add_htlcs.is_empty()); // (4)
294 assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
295 assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
296 assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
297 assert!(bs_update.update_fee.is_none()); // (4)
298 check_added_monitors!(nodes[1], 1);
300 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
301 let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
302 assert!(as_update.update_add_htlcs.is_empty()); // (5)
303 assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
304 assert!(as_update.update_fail_htlcs.is_empty()); // (5)
305 assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
306 assert!(as_update.update_fee.is_none()); // (5)
307 check_added_monitors!(nodes[0], 1);
309 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
310 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
311 // only (6) so get_event_msg's assert(len == 1) passes
312 check_added_monitors!(nodes[0], 1);
314 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
315 let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
316 check_added_monitors!(nodes[1], 1);
318 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
319 check_added_monitors!(nodes[0], 1);
321 let events_2 = nodes[0].node.get_and_clear_pending_events();
322 assert_eq!(events_2.len(), 1);
324 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
325 _ => panic!("Unexpected event"),
328 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
329 check_added_monitors!(nodes[1], 1);
333 fn test_update_fee_unordered_raa() {
334 // Just the intro to the previous test followed by an out-of-order RAA (which caused a
335 // crash in an earlier version of the update_fee patch)
336 let chanmon_cfgs = create_chanmon_cfgs(2);
337 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
338 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
339 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
340 create_announced_chan_between_nodes(&nodes, 0, 1);
343 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
345 // First nodes[0] generates an update_fee
347 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
350 nodes[0].node.timer_tick_occurred();
351 check_added_monitors!(nodes[0], 1);
353 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
354 assert_eq!(events_0.len(), 1);
355 let update_msg = match events_0[0] { // (1)
356 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
359 _ => panic!("Unexpected event"),
362 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
364 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
365 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
366 nodes[1].node.send_payment_with_route(&route, our_payment_hash,
367 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
368 check_added_monitors!(nodes[1], 1);
370 let payment_event = {
371 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
372 assert_eq!(events_1.len(), 1);
373 SendEvent::from_event(events_1.remove(0))
375 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
376 assert_eq!(payment_event.msgs.len(), 1);
378 // ...now when the messages get delivered everyone should be happy
379 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
380 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
381 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
382 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
383 check_added_monitors!(nodes[0], 1);
385 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
386 check_added_monitors!(nodes[1], 1);
388 // We can't continue, sadly, because our (1) now has a bogus signature
392 fn test_multi_flight_update_fee() {
393 let chanmon_cfgs = create_chanmon_cfgs(2);
394 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
395 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
396 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
397 create_announced_chan_between_nodes(&nodes, 0, 1);
400 // update_fee/commitment_signed ->
401 // .- send (1) RAA and (2) commitment_signed
402 // update_fee (never committed) ->
404 // We have to manually generate the above update_fee, it is allowed by the protocol but we
405 // don't track which updates correspond to which revoke_and_ack responses so we're in
406 // AwaitingRAA mode and will not generate the update_fee yet.
407 // <- (1) RAA delivered
408 // (3) is generated and send (4) CS -.
409 // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
410 // know the per_commitment_point to use for it.
411 // <- (2) commitment_signed delivered
413 // B should send no response here
414 // (4) commitment_signed delivered ->
415 // <- RAA/commitment_signed delivered
418 // First nodes[0] generates an update_fee
421 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
422 initial_feerate = *feerate_lock;
423 *feerate_lock = initial_feerate + 20;
425 nodes[0].node.timer_tick_occurred();
426 check_added_monitors!(nodes[0], 1);
428 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
429 assert_eq!(events_0.len(), 1);
430 let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
431 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
432 (update_fee.as_ref().unwrap(), commitment_signed)
434 _ => panic!("Unexpected event"),
437 // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
438 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
439 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
440 let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
441 check_added_monitors!(nodes[1], 1);
443 // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
446 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
447 *feerate_lock = initial_feerate + 40;
449 nodes[0].node.timer_tick_occurred();
450 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
451 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
453 // Create the (3) update_fee message that nodes[0] will generate before it does...
454 let mut update_msg_2 = msgs::UpdateFee {
455 channel_id: update_msg_1.channel_id.clone(),
456 feerate_per_kw: (initial_feerate + 30) as u32,
459 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
461 update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
463 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
465 // Deliver (1), generating (3) and (4)
466 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
467 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
468 check_added_monitors!(nodes[0], 1);
469 assert!(as_second_update.update_add_htlcs.is_empty());
470 assert!(as_second_update.update_fulfill_htlcs.is_empty());
471 assert!(as_second_update.update_fail_htlcs.is_empty());
472 assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
473 // Check that the update_fee newly generated matches what we delivered:
474 assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
475 assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
477 // Deliver (2) commitment_signed
478 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
479 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
480 check_added_monitors!(nodes[0], 1);
481 // No commitment_signed so get_event_msg's assert(len == 1) passes
483 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
484 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
485 check_added_monitors!(nodes[1], 1);
488 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
489 let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
490 check_added_monitors!(nodes[1], 1);
492 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
493 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
494 check_added_monitors!(nodes[0], 1);
496 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
497 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
498 // No commitment_signed so get_event_msg's assert(len == 1) passes
499 check_added_monitors!(nodes[0], 1);
501 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
502 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
503 check_added_monitors!(nodes[1], 1);
506 fn do_test_sanity_on_in_flight_opens(steps: u8) {
507 // Previously, we had issues deserializing channels when we hadn't connected the first block
508 // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
509 // serialization round-trips and simply do steps towards opening a channel and then drop the
512 let chanmon_cfgs = create_chanmon_cfgs(2);
513 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
514 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
515 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
517 if steps & 0b1000_0000 != 0{
518 let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
519 connect_block(&nodes[0], &block);
520 connect_block(&nodes[1], &block);
523 if steps & 0x0f == 0 { return; }
524 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
525 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
527 if steps & 0x0f == 1 { return; }
528 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
529 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
531 if steps & 0x0f == 2 { return; }
532 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
534 let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
536 if steps & 0x0f == 3 { return; }
537 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
538 check_added_monitors!(nodes[0], 0);
539 let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
541 if steps & 0x0f == 4 { return; }
542 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
544 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
545 assert_eq!(added_monitors.len(), 1);
546 assert_eq!(added_monitors[0].0, funding_output);
547 added_monitors.clear();
549 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
551 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
553 if steps & 0x0f == 5 { return; }
554 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
556 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
557 assert_eq!(added_monitors.len(), 1);
558 assert_eq!(added_monitors[0].0, funding_output);
559 added_monitors.clear();
562 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
563 let events_4 = nodes[0].node.get_and_clear_pending_events();
564 assert_eq!(events_4.len(), 0);
566 if steps & 0x0f == 6 { return; }
567 create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
569 if steps & 0x0f == 7 { return; }
570 confirm_transaction_at(&nodes[0], &tx, 2);
571 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
572 create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
573 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
577 fn test_sanity_on_in_flight_opens() {
578 do_test_sanity_on_in_flight_opens(0);
579 do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
580 do_test_sanity_on_in_flight_opens(1);
581 do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
582 do_test_sanity_on_in_flight_opens(2);
583 do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
584 do_test_sanity_on_in_flight_opens(3);
585 do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
586 do_test_sanity_on_in_flight_opens(4);
587 do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
588 do_test_sanity_on_in_flight_opens(5);
589 do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
590 do_test_sanity_on_in_flight_opens(6);
591 do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
592 do_test_sanity_on_in_flight_opens(7);
593 do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
594 do_test_sanity_on_in_flight_opens(8);
595 do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
599 fn test_update_fee_vanilla() {
600 let chanmon_cfgs = create_chanmon_cfgs(2);
601 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
602 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
603 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
604 create_announced_chan_between_nodes(&nodes, 0, 1);
607 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
610 nodes[0].node.timer_tick_occurred();
611 check_added_monitors!(nodes[0], 1);
613 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
614 assert_eq!(events_0.len(), 1);
615 let (update_msg, commitment_signed) = match events_0[0] {
616 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
617 (update_fee.as_ref(), commitment_signed)
619 _ => panic!("Unexpected event"),
621 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
623 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
624 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
625 check_added_monitors!(nodes[1], 1);
627 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
628 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
629 check_added_monitors!(nodes[0], 1);
631 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
632 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
633 // No commitment_signed so get_event_msg's assert(len == 1) passes
634 check_added_monitors!(nodes[0], 1);
636 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
637 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
638 check_added_monitors!(nodes[1], 1);
642 fn test_update_fee_that_funder_cannot_afford() {
643 let chanmon_cfgs = create_chanmon_cfgs(2);
644 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
645 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
646 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
647 let channel_value = 5000;
649 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
650 let channel_id = chan.2;
651 let secp_ctx = Secp256k1::new();
652 let default_config = UserConfig::default();
653 let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
655 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
657 // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
658 // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
659 // calculate two different feerates here - the expected local limit as well as the expected
661 let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
662 let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
664 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
665 *feerate_lock = feerate;
667 nodes[0].node.timer_tick_occurred();
668 check_added_monitors!(nodes[0], 1);
669 let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
671 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
673 commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
675 // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
677 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
679 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
680 assert_eq!(commitment_tx.output.len(), 2);
681 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
682 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
683 actual_fee = channel_value - actual_fee;
684 assert_eq!(total_fee, actual_fee);
688 // Increment the feerate by a small constant, accounting for rounding errors
689 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
692 nodes[0].node.timer_tick_occurred();
693 nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
694 check_added_monitors!(nodes[0], 0);
696 const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
698 // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
699 // needed to sign the new commitment tx and (2) sign the new commitment tx.
700 let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
701 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
702 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
703 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
704 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
705 ).flatten().unwrap();
706 let chan_signer = local_chan.get_signer();
707 let pubkeys = chan_signer.as_ref().pubkeys();
708 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
709 pubkeys.funding_pubkey)
711 let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
712 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
713 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
714 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
715 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
716 ).flatten().unwrap();
717 let chan_signer = remote_chan.get_signer();
718 let pubkeys = chan_signer.as_ref().pubkeys();
719 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
720 chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
721 pubkeys.funding_pubkey)
724 // Assemble the set of keys we can use for signatures for our commitment_signed message.
725 let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
726 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
729 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
730 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
731 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
732 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
733 ).flatten().unwrap();
734 let local_chan_signer = local_chan.get_signer();
735 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
736 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
737 INITIAL_COMMITMENT_NUMBER - 1,
739 channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
740 local_funding, remote_funding,
741 commit_tx_keys.clone(),
742 non_buffer_feerate + 4,
744 &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
746 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
749 let commit_signed_msg = msgs::CommitmentSigned {
752 htlc_signatures: res.1,
754 partial_signature_with_nonce: None,
757 let update_fee = msgs::UpdateFee {
759 feerate_per_kw: non_buffer_feerate + 4,
762 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
764 //While producing the commitment_signed response after handling a received update_fee request the
765 //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
766 //Should produce and error.
767 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
768 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3);
769 check_added_monitors!(nodes[1], 1);
770 check_closed_broadcast!(nodes[1], true);
771 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
772 [nodes[0].node.get_our_node_id()], channel_value);
776 fn test_update_fee_with_fundee_update_add_htlc() {
777 let chanmon_cfgs = create_chanmon_cfgs(2);
778 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
779 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
780 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
781 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
784 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
787 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
790 nodes[0].node.timer_tick_occurred();
791 check_added_monitors!(nodes[0], 1);
793 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
794 assert_eq!(events_0.len(), 1);
795 let (update_msg, commitment_signed) = match events_0[0] {
796 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
797 (update_fee.as_ref(), commitment_signed)
799 _ => panic!("Unexpected event"),
801 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
802 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
803 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
804 check_added_monitors!(nodes[1], 1);
806 let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
808 // nothing happens since node[1] is in AwaitingRemoteRevoke
809 nodes[1].node.send_payment_with_route(&route, our_payment_hash,
810 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
812 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
813 assert_eq!(added_monitors.len(), 0);
814 added_monitors.clear();
816 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
817 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
818 // node[1] has nothing to do
820 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
821 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
822 check_added_monitors!(nodes[0], 1);
824 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
825 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
826 // No commitment_signed so get_event_msg's assert(len == 1) passes
827 check_added_monitors!(nodes[0], 1);
828 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
829 check_added_monitors!(nodes[1], 1);
830 // AwaitingRemoteRevoke ends here
832 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
833 assert_eq!(commitment_update.update_add_htlcs.len(), 1);
834 assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
835 assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
836 assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
837 assert_eq!(commitment_update.update_fee.is_none(), true);
839 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
840 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
841 check_added_monitors!(nodes[0], 1);
842 let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
844 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
845 check_added_monitors!(nodes[1], 1);
846 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
848 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
849 check_added_monitors!(nodes[1], 1);
850 let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
851 // No commitment_signed so get_event_msg's assert(len == 1) passes
853 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
854 check_added_monitors!(nodes[0], 1);
855 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
857 expect_pending_htlcs_forwardable!(nodes[0]);
859 let events = nodes[0].node.get_and_clear_pending_events();
860 assert_eq!(events.len(), 1);
862 Event::PaymentClaimable { .. } => { },
863 _ => panic!("Unexpected event"),
866 claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
868 send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
869 send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
870 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
871 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
872 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
876 fn test_update_fee() {
877 let chanmon_cfgs = create_chanmon_cfgs(2);
878 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
879 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
880 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
881 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
882 let channel_id = chan.2;
885 // (1) update_fee/commitment_signed ->
886 // <- (2) revoke_and_ack
887 // .- send (3) commitment_signed
888 // (4) update_fee/commitment_signed ->
889 // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
890 // <- (3) commitment_signed delivered
891 // send (6) revoke_and_ack -.
892 // <- (5) deliver revoke_and_ack
893 // (6) deliver revoke_and_ack ->
894 // .- send (7) commitment_signed in response to (4)
895 // <- (7) deliver commitment_signed
898 // Create and deliver (1)...
901 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
902 feerate = *feerate_lock;
903 *feerate_lock = feerate + 20;
905 nodes[0].node.timer_tick_occurred();
906 check_added_monitors!(nodes[0], 1);
908 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
909 assert_eq!(events_0.len(), 1);
910 let (update_msg, commitment_signed) = match events_0[0] {
911 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
912 (update_fee.as_ref(), commitment_signed)
914 _ => panic!("Unexpected event"),
916 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
918 // Generate (2) and (3):
919 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
920 let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
921 check_added_monitors!(nodes[1], 1);
924 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
925 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
926 check_added_monitors!(nodes[0], 1);
928 // Create and deliver (4)...
930 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
931 *feerate_lock = feerate + 30;
933 nodes[0].node.timer_tick_occurred();
934 check_added_monitors!(nodes[0], 1);
935 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
936 assert_eq!(events_0.len(), 1);
937 let (update_msg, commitment_signed) = match events_0[0] {
938 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
939 (update_fee.as_ref(), commitment_signed)
941 _ => panic!("Unexpected event"),
944 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
945 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
946 check_added_monitors!(nodes[1], 1);
948 let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
949 // No commitment_signed so get_event_msg's assert(len == 1) passes
951 // Handle (3), creating (6):
952 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
953 check_added_monitors!(nodes[0], 1);
954 let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
955 // No commitment_signed so get_event_msg's assert(len == 1) passes
958 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
959 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
960 check_added_monitors!(nodes[0], 1);
962 // Deliver (6), creating (7):
963 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
964 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
965 assert!(commitment_update.update_add_htlcs.is_empty());
966 assert!(commitment_update.update_fulfill_htlcs.is_empty());
967 assert!(commitment_update.update_fail_htlcs.is_empty());
968 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
969 assert!(commitment_update.update_fee.is_none());
970 check_added_monitors!(nodes[1], 1);
973 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
974 check_added_monitors!(nodes[0], 1);
975 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
976 // No commitment_signed so get_event_msg's assert(len == 1) passes
978 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
979 check_added_monitors!(nodes[1], 1);
980 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
982 assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
983 assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
984 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
985 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
986 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
990 fn fake_network_test() {
991 // Simple test which builds a network of ChannelManagers, connects them to each other, and
992 // tests that payments get routed and transactions broadcast in semi-reasonable ways.
993 let chanmon_cfgs = create_chanmon_cfgs(4);
994 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
995 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
996 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
998 // Create some initial channels
999 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1000 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1001 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
1003 // Rebalance the network a bit by relaying one payment through all the channels...
1004 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1005 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1006 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1007 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1009 // Send some more payments
1010 send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1011 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1012 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1014 // Test failure packets
1015 let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1016 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1018 // Add a new channel that skips 3
1019 let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1021 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1022 send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1023 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1024 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1025 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1026 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1027 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1029 // Do some rebalance loop payments, simultaneously
1030 let mut hops = Vec::with_capacity(3);
1031 hops.push(RouteHop {
1032 pubkey: nodes[2].node.get_our_node_id(),
1033 node_features: NodeFeatures::empty(),
1034 short_channel_id: chan_2.0.contents.short_channel_id,
1035 channel_features: ChannelFeatures::empty(),
1037 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32,
1038 maybe_announced_channel: true,
1040 hops.push(RouteHop {
1041 pubkey: nodes[3].node.get_our_node_id(),
1042 node_features: NodeFeatures::empty(),
1043 short_channel_id: chan_3.0.contents.short_channel_id,
1044 channel_features: ChannelFeatures::empty(),
1046 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32,
1047 maybe_announced_channel: true,
1049 hops.push(RouteHop {
1050 pubkey: nodes[1].node.get_our_node_id(),
1051 node_features: nodes[1].node.node_features(),
1052 short_channel_id: chan_4.0.contents.short_channel_id,
1053 channel_features: nodes[1].node.channel_features(),
1055 cltv_expiry_delta: TEST_FINAL_CLTV,
1056 maybe_announced_channel: true,
1058 hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1059 hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1060 let payment_preimage_1 = send_along_route(&nodes[1],
1061 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1062 &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1064 let mut hops = Vec::with_capacity(3);
1065 hops.push(RouteHop {
1066 pubkey: nodes[3].node.get_our_node_id(),
1067 node_features: NodeFeatures::empty(),
1068 short_channel_id: chan_4.0.contents.short_channel_id,
1069 channel_features: ChannelFeatures::empty(),
1071 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32,
1072 maybe_announced_channel: true,
1074 hops.push(RouteHop {
1075 pubkey: nodes[2].node.get_our_node_id(),
1076 node_features: NodeFeatures::empty(),
1077 short_channel_id: chan_3.0.contents.short_channel_id,
1078 channel_features: ChannelFeatures::empty(),
1080 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32,
1081 maybe_announced_channel: true,
1083 hops.push(RouteHop {
1084 pubkey: nodes[1].node.get_our_node_id(),
1085 node_features: nodes[1].node.node_features(),
1086 short_channel_id: chan_2.0.contents.short_channel_id,
1087 channel_features: nodes[1].node.channel_features(),
1089 cltv_expiry_delta: TEST_FINAL_CLTV,
1090 maybe_announced_channel: true,
1092 hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1093 hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1094 let payment_hash_2 = send_along_route(&nodes[1],
1095 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1096 &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1098 // Claim the rebalances...
1099 fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1100 claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1102 // Close down the channels...
1103 close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1104 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1105 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1106 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1107 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1108 check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1109 close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1110 check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1111 check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1112 close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1113 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1114 check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1118 fn holding_cell_htlc_counting() {
1119 // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1120 // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1121 // commitment dance rounds.
1122 let chanmon_cfgs = create_chanmon_cfgs(3);
1123 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1124 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1125 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1126 create_announced_chan_between_nodes(&nodes, 0, 1);
1127 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1129 // Fetch a route in advance as we will be unable to once we're unable to send.
1130 let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1132 let mut payments = Vec::new();
1134 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1135 nodes[1].node.send_payment_with_route(&route, payment_hash,
1136 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1137 payments.push((payment_preimage, payment_hash));
1139 check_added_monitors!(nodes[1], 1);
1141 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1142 assert_eq!(events.len(), 1);
1143 let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1144 assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1146 // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1147 // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1150 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1151 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1152 ), true, APIError::ChannelUnavailable { .. }, {});
1153 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1156 // This should also be true if we try to forward a payment.
1157 let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1159 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1160 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1161 check_added_monitors!(nodes[0], 1);
1164 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1165 assert_eq!(events.len(), 1);
1166 let payment_event = SendEvent::from_event(events.pop().unwrap());
1167 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1169 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1170 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1171 // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1172 // fails), the second will process the resulting failure and fail the HTLC backward.
1173 expect_pending_htlcs_forwardable!(nodes[1]);
1174 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1175 check_added_monitors!(nodes[1], 1);
1177 let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1178 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1179 commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1181 expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1183 // Now forward all the pending HTLCs and claim them back
1184 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1185 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1186 check_added_monitors!(nodes[2], 1);
1188 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1189 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1190 check_added_monitors!(nodes[1], 1);
1191 let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1193 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1194 check_added_monitors!(nodes[1], 1);
1195 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1197 for ref update in as_updates.update_add_htlcs.iter() {
1198 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1200 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1201 check_added_monitors!(nodes[2], 1);
1202 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1203 check_added_monitors!(nodes[2], 1);
1204 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1206 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1207 check_added_monitors!(nodes[1], 1);
1208 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1209 check_added_monitors!(nodes[1], 1);
1210 let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1212 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1213 check_added_monitors!(nodes[2], 1);
1215 expect_pending_htlcs_forwardable!(nodes[2]);
1217 let events = nodes[2].node.get_and_clear_pending_events();
1218 assert_eq!(events.len(), payments.len());
1219 for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1221 &Event::PaymentClaimable { ref payment_hash, .. } => {
1222 assert_eq!(*payment_hash, *hash);
1224 _ => panic!("Unexpected event"),
1228 for (preimage, _) in payments.drain(..) {
1229 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1232 send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1236 fn duplicate_htlc_test() {
1237 // Test that we accept duplicate payment_hash HTLCs across the network and that
1238 // claiming/failing them are all separate and don't affect each other
1239 let chanmon_cfgs = create_chanmon_cfgs(6);
1240 let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1241 let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1242 let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1244 // Create some initial channels to route via 3 to 4/5 from 0/1/2
1245 create_announced_chan_between_nodes(&nodes, 0, 3);
1246 create_announced_chan_between_nodes(&nodes, 1, 3);
1247 create_announced_chan_between_nodes(&nodes, 2, 3);
1248 create_announced_chan_between_nodes(&nodes, 3, 4);
1249 create_announced_chan_between_nodes(&nodes, 3, 5);
1251 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1253 *nodes[0].network_payment_count.borrow_mut() -= 1;
1254 assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1256 *nodes[0].network_payment_count.borrow_mut() -= 1;
1257 assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1259 claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1260 fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1261 claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1265 fn test_duplicate_htlc_different_direction_onchain() {
1266 // Test that ChannelMonitor doesn't generate 2 preimage txn
1267 // when we have 2 HTLCs with same preimage that go across a node
1268 // in opposite directions, even with the same payment secret.
1269 let chanmon_cfgs = create_chanmon_cfgs(2);
1270 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1271 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1272 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1274 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1277 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1279 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1281 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1282 let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1283 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1285 // Provide preimage to node 0 by claiming payment
1286 nodes[0].node.claim_funds(payment_preimage);
1287 expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1288 check_added_monitors!(nodes[0], 1);
1290 // Broadcast node 1 commitment txn
1291 let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1293 assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1294 let mut has_both_htlcs = 0; // check htlcs match ones committed
1295 for outp in remote_txn[0].output.iter() {
1296 if outp.value == 800_000 / 1000 {
1297 has_both_htlcs += 1;
1298 } else if outp.value == 900_000 / 1000 {
1299 has_both_htlcs += 1;
1302 assert_eq!(has_both_htlcs, 2);
1304 mine_transaction(&nodes[0], &remote_txn[0]);
1305 check_added_monitors!(nodes[0], 1);
1306 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
1307 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
1309 let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1310 assert_eq!(claim_txn.len(), 3);
1312 check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1313 check_spends!(claim_txn[1], remote_txn[0]);
1314 check_spends!(claim_txn[2], remote_txn[0]);
1315 let preimage_tx = &claim_txn[0];
1316 let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1317 (&claim_txn[1], &claim_txn[2])
1319 (&claim_txn[2], &claim_txn[1])
1322 assert_eq!(preimage_tx.input.len(), 1);
1323 assert_eq!(preimage_bump_tx.input.len(), 1);
1325 assert_eq!(preimage_tx.input.len(), 1);
1326 assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1327 assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800);
1329 assert_eq!(timeout_tx.input.len(), 1);
1330 assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1331 check_spends!(timeout_tx, remote_txn[0]);
1332 assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900);
1334 let events = nodes[0].node.get_and_clear_pending_msg_events();
1335 assert_eq!(events.len(), 3);
1338 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1339 MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => {
1340 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1341 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1343 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1344 assert!(update_add_htlcs.is_empty());
1345 assert!(update_fail_htlcs.is_empty());
1346 assert_eq!(update_fulfill_htlcs.len(), 1);
1347 assert!(update_fail_malformed_htlcs.is_empty());
1348 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1350 _ => panic!("Unexpected event"),
1356 fn test_basic_channel_reserve() {
1357 let chanmon_cfgs = create_chanmon_cfgs(2);
1358 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1359 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1360 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1361 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1363 let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1364 let channel_reserve = chan_stat.channel_reserve_msat;
1366 // The 2* and +1 are for the fee spike reserve.
1367 let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
1368 let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1369 let (mut route, our_payment_hash, _, our_payment_secret) =
1370 get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
1371 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1372 let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1373 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1375 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1376 if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
1377 else { panic!("Unexpected error variant"); }
1379 _ => panic!("Unexpected error variant"),
1381 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1383 send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1387 fn test_fee_spike_violation_fails_htlc() {
1388 let chanmon_cfgs = create_chanmon_cfgs(2);
1389 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1390 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1391 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1392 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1394 let (mut route, payment_hash, _, payment_secret) =
1395 get_route_and_payment_hash!(nodes[0], nodes[1], 3460000);
1396 route.paths[0].hops[0].fee_msat += 1;
1397 // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1398 let secp_ctx = Secp256k1::new();
1399 let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1401 let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1403 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1404 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1405 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1406 3460001, &recipient_onion_fields, cur_height, &None).unwrap();
1407 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1408 let msg = msgs::UpdateAddHTLC {
1411 amount_msat: htlc_msat,
1412 payment_hash: payment_hash,
1413 cltv_expiry: htlc_cltv,
1414 onion_routing_packet: onion_packet,
1415 skimmed_fee_msat: None,
1416 blinding_point: None,
1419 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1421 // Now manually create the commitment_signed message corresponding to the update_add
1422 // nodes[0] just sent. In the code for construction of this message, "local" refers
1423 // to the sender of the message, and "remote" refers to the receiver.
1425 let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1427 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1429 // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
1430 // needed to sign the new commitment tx and (2) sign the new commitment tx.
1431 let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1432 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1433 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1434 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
1435 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1436 ).flatten().unwrap();
1437 let chan_signer = local_chan.get_signer();
1438 // Make the signer believe we validated another commitment, so we can release the secret
1439 chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
1441 let pubkeys = chan_signer.as_ref().pubkeys();
1442 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1443 chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1444 chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1445 chan_signer.as_ref().pubkeys().funding_pubkey)
1447 let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1448 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1449 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1450 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
1451 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1452 ).flatten().unwrap();
1453 let chan_signer = remote_chan.get_signer();
1454 let pubkeys = chan_signer.as_ref().pubkeys();
1455 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1456 chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1457 chan_signer.as_ref().pubkeys().funding_pubkey)
1460 // Assemble the set of keys we can use for signatures for our commitment_signed message.
1461 let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1462 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1464 // Build the remote commitment transaction so we can sign it, and then later use the
1465 // signature for the commitment_signed message.
1466 let local_chan_balance = 1313;
1468 let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1470 amount_msat: 3460001,
1471 cltv_expiry: htlc_cltv,
1473 transaction_output_index: Some(1),
1476 let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1479 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1480 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1481 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
1482 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1483 ).flatten().unwrap();
1484 let local_chan_signer = local_chan.get_signer();
1485 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1489 local_funding, remote_funding,
1490 commit_tx_keys.clone(),
1492 &mut vec![(accepted_htlc_info, ())],
1493 &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
1495 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
1498 let commit_signed_msg = msgs::CommitmentSigned {
1501 htlc_signatures: res.1,
1503 partial_signature_with_nonce: None,
1506 // Send the commitment_signed message to the nodes[1].
1507 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1508 let _ = nodes[1].node.get_and_clear_pending_msg_events();
1510 // Send the RAA to nodes[1].
1511 let raa_msg = msgs::RevokeAndACK {
1513 per_commitment_secret: local_secret,
1514 next_per_commitment_point: next_local_point,
1516 next_local_nonce: None,
1518 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1520 let events = nodes[1].node.get_and_clear_pending_msg_events();
1521 assert_eq!(events.len(), 1);
1522 // Make sure the HTLC failed in the way we expect.
1524 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1525 assert_eq!(update_fail_htlcs.len(), 1);
1526 update_fail_htlcs[0].clone()
1528 _ => panic!("Unexpected event"),
1530 nodes[1].logger.assert_log("lightning::ln::channel",
1531 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
1533 check_added_monitors!(nodes[1], 2);
1537 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1538 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1539 // Set the fee rate for the channel very high, to the point where the fundee
1540 // sending any above-dust amount would result in a channel reserve violation.
1541 // In this test we check that we would be prevented from sending an HTLC in
1543 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1544 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1545 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1546 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1547 let default_config = UserConfig::default();
1548 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1550 let mut push_amt = 100_000_000;
1551 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1553 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1555 let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1557 // Fetch a route in advance as we will be unable to once we're unable to send.
1558 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1559 // Sending exactly enough to hit the reserve amount should be accepted
1560 for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1561 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1564 // However one more HTLC should be significantly over the reserve amount and fail.
1565 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1566 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1567 ), true, APIError::ChannelUnavailable { .. }, {});
1568 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1572 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1573 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1574 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1575 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1576 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1577 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1578 let default_config = UserConfig::default();
1579 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1581 // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1582 // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1583 // transaction fee with 0 HTLCs (183 sats)).
1584 let mut push_amt = 100_000_000;
1585 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1586 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1587 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1589 // Send four HTLCs to cover the initial push_msat buffer we're required to include
1590 for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1591 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1594 let (mut route, payment_hash, _, payment_secret) =
1595 get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1596 route.paths[0].hops[0].fee_msat = 700_000;
1597 // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1598 let secp_ctx = Secp256k1::new();
1599 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1600 let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1601 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1602 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1603 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1604 700_000, &recipient_onion_fields, cur_height, &None).unwrap();
1605 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1606 let msg = msgs::UpdateAddHTLC {
1608 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1609 amount_msat: htlc_msat,
1610 payment_hash: payment_hash,
1611 cltv_expiry: htlc_cltv,
1612 onion_routing_packet: onion_packet,
1613 skimmed_fee_msat: None,
1614 blinding_point: None,
1617 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1618 // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1619 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3);
1620 assert_eq!(nodes[0].node.list_channels().len(), 0);
1621 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1622 assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1623 check_added_monitors!(nodes[0], 1);
1624 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
1625 [nodes[1].node.get_our_node_id()], 100000);
1629 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1630 // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1631 // calculating our commitment transaction fee (this was previously broken).
1632 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1633 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1635 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1636 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1637 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1638 let default_config = UserConfig::default();
1639 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1641 // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1642 // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1643 // transaction fee with 0 HTLCs (183 sats)).
1644 let mut push_amt = 100_000_000;
1645 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1646 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1647 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1649 let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1650 + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
1651 // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1652 // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1653 // commitment transaction fee.
1654 route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1656 // Send four HTLCs to cover the initial push_msat buffer we're required to include
1657 for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1658 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1661 // One more than the dust amt should fail, however.
1662 let (mut route, our_payment_hash, _, our_payment_secret) =
1663 get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
1664 route.paths[0].hops[0].fee_msat += 1;
1665 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1666 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1667 ), true, APIError::ChannelUnavailable { .. }, {});
1671 fn test_chan_init_feerate_unaffordability() {
1672 // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1673 // channel reserve and feerate requirements.
1674 let mut chanmon_cfgs = create_chanmon_cfgs(2);
1675 let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1676 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1677 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1678 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1679 let default_config = UserConfig::default();
1680 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1682 // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1684 let mut push_amt = 100_000_000;
1685 push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1686 assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(),
1687 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1689 // During open, we don't have a "counterparty channel reserve" to check against, so that
1690 // requirement only comes into play on the open_channel handling side.
1691 push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1692 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap();
1693 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1694 open_channel_msg.push_msat += 1;
1695 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1697 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1698 assert_eq!(msg_events.len(), 1);
1699 match msg_events[0] {
1700 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1701 assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1703 _ => panic!("Unexpected event"),
1708 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1709 // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1710 // calculating our counterparty's commitment transaction fee (this was previously broken).
1711 let chanmon_cfgs = create_chanmon_cfgs(2);
1712 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1713 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1714 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1715 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1717 let payment_amt = 46000; // Dust amount
1718 // In the previous code, these first four payments would succeed.
1719 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1720 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1721 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1722 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1724 // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1725 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1726 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1727 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1728 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1729 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1731 // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1732 // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1733 // transaction fee and therefore perceived this next payment as a channel reserve violation.
1734 route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1738 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1739 let chanmon_cfgs = create_chanmon_cfgs(3);
1740 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1741 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1742 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1743 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1744 let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1747 let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1748 let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1749 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1750 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
1752 // Add a 2* and +1 for the fee spike reserve.
1753 let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1754 let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1755 let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1757 // Add a pending HTLC.
1758 let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1759 let payment_event_1 = {
1760 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1761 RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1762 check_added_monitors!(nodes[0], 1);
1764 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1765 assert_eq!(events.len(), 1);
1766 SendEvent::from_event(events.remove(0))
1768 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1770 // Attempt to trigger a channel reserve violation --> payment failure.
1771 let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
1772 let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1773 let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1774 let mut route_2 = route_1.clone();
1775 route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2;
1777 // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1778 let secp_ctx = Secp256k1::new();
1779 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1780 let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
1781 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1782 let recipient_onion_fields = RecipientOnionFields::spontaneous_empty();
1783 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
1784 &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None).unwrap();
1785 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
1786 let msg = msgs::UpdateAddHTLC {
1789 amount_msat: htlc_msat + 1,
1790 payment_hash: our_payment_hash_1,
1791 cltv_expiry: htlc_cltv,
1792 onion_routing_packet: onion_packet,
1793 skimmed_fee_msat: None,
1794 blinding_point: None,
1797 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1798 // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1799 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3);
1800 assert_eq!(nodes[1].node.list_channels().len(), 1);
1801 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1802 assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1803 check_added_monitors!(nodes[1], 1);
1804 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
1805 [nodes[0].node.get_our_node_id()], 100000);
1809 fn test_inbound_outbound_capacity_is_not_zero() {
1810 let chanmon_cfgs = create_chanmon_cfgs(2);
1811 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1812 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1813 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1814 let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1815 let channels0 = node_chanmgrs[0].list_channels();
1816 let channels1 = node_chanmgrs[1].list_channels();
1817 let default_config = UserConfig::default();
1818 assert_eq!(channels0.len(), 1);
1819 assert_eq!(channels1.len(), 1);
1821 let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1822 assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1823 assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1825 assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1826 assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1829 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
1830 (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1834 fn test_channel_reserve_holding_cell_htlcs() {
1835 let chanmon_cfgs = create_chanmon_cfgs(3);
1836 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1837 // When this test was written, the default base fee floated based on the HTLC count.
1838 // It is now fixed, so we simply set the fee to the expected value here.
1839 let mut config = test_default_channel_config();
1840 config.channel_config.forwarding_fee_base_msat = 239;
1841 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1842 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1843 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1844 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1846 let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1847 let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1849 let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1850 let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1852 macro_rules! expect_forward {
1854 let mut events = $node.node.get_and_clear_pending_msg_events();
1855 assert_eq!(events.len(), 1);
1856 check_added_monitors!($node, 1);
1857 let payment_event = SendEvent::from_event(events.remove(0));
1862 let feemsat = 239; // set above
1863 let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1864 let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1865 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
1867 let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1869 // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1871 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1872 .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1873 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
1874 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1875 assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1877 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1878 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1879 ), true, APIError::ChannelUnavailable { .. }, {});
1880 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1883 // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1884 // nodes[0]'s wealth
1886 let amt_msat = recv_value_0 + total_fee_msat;
1887 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1888 // Also, ensure that each payment has enough to be over the dust limit to
1889 // ensure it'll be included in each commit tx fee calculation.
1890 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1891 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1892 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1896 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1897 .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1898 let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
1899 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1900 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1902 let (stat01_, stat11_, stat12_, stat22_) = (
1903 get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1904 get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1905 get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1906 get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1909 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1910 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1911 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1912 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1913 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1916 // adding pending output.
1917 // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1918 // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1919 // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1920 // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1921 // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1922 // cases where 1 msat over X amount will cause a payment failure, but anything less than
1923 // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1924 // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1925 // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1927 let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1928 let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1929 let amt_msat_1 = recv_value_1 + total_fee_msat;
1931 let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1932 let payment_event_1 = {
1933 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1934 RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1935 check_added_monitors!(nodes[0], 1);
1937 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1938 assert_eq!(events.len(), 1);
1939 SendEvent::from_event(events.remove(0))
1941 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1943 // channel reserve test with htlc pending output > 0
1944 let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1946 let mut route = route_1.clone();
1947 route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1;
1948 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
1949 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1950 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1951 ), true, APIError::ChannelUnavailable { .. }, {});
1952 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1955 // split the rest to test holding cell
1956 let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1957 let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1958 let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1959 let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1961 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1962 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1965 // now see if they go through on both sides
1966 let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
1967 // but this will stuck in the holding cell
1968 nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
1969 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
1970 check_added_monitors!(nodes[0], 0);
1971 let events = nodes[0].node.get_and_clear_pending_events();
1972 assert_eq!(events.len(), 0);
1974 // test with outbound holding cell amount > 0
1976 let (mut route, our_payment_hash, _, our_payment_secret) =
1977 get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1978 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1979 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1980 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1981 ), true, APIError::ChannelUnavailable { .. }, {});
1982 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1985 let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1986 // this will also stuck in the holding cell
1987 nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
1988 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
1989 check_added_monitors!(nodes[0], 0);
1990 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1991 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1993 // flush the pending htlc
1994 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
1995 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1996 check_added_monitors!(nodes[1], 1);
1998 // the pending htlc should be promoted to committed
1999 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
2000 check_added_monitors!(nodes[0], 1);
2001 let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2003 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
2004 let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2005 // No commitment_signed so get_event_msg's assert(len == 1) passes
2006 check_added_monitors!(nodes[0], 1);
2008 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
2009 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2010 check_added_monitors!(nodes[1], 1);
2012 expect_pending_htlcs_forwardable!(nodes[1]);
2014 let ref payment_event_11 = expect_forward!(nodes[1]);
2015 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
2016 commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
2018 expect_pending_htlcs_forwardable!(nodes[2]);
2019 expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
2021 // flush the htlcs in the holding cell
2022 assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
2023 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
2024 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
2025 commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
2026 expect_pending_htlcs_forwardable!(nodes[1]);
2028 let ref payment_event_3 = expect_forward!(nodes[1]);
2029 assert_eq!(payment_event_3.msgs.len(), 2);
2030 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2031 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2033 commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2034 expect_pending_htlcs_forwardable!(nodes[2]);
2036 let events = nodes[2].node.get_and_clear_pending_events();
2037 assert_eq!(events.len(), 2);
2039 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2040 assert_eq!(our_payment_hash_21, *payment_hash);
2041 assert_eq!(recv_value_21, amount_msat);
2042 assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2043 assert_eq!(via_channel_id, Some(chan_2.2));
2045 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2046 assert!(payment_preimage.is_none());
2047 assert_eq!(our_payment_secret_21, *payment_secret);
2049 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2052 _ => panic!("Unexpected event"),
2055 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2056 assert_eq!(our_payment_hash_22, *payment_hash);
2057 assert_eq!(recv_value_22, amount_msat);
2058 assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2059 assert_eq!(via_channel_id, Some(chan_2.2));
2061 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2062 assert!(payment_preimage.is_none());
2063 assert_eq!(our_payment_secret_22, *payment_secret);
2065 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2068 _ => panic!("Unexpected event"),
2071 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2072 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2073 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2075 let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
2076 let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2077 send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2079 let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
2080 let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2081 let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2082 assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2083 assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2085 let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2086 assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2090 fn channel_reserve_in_flight_removes() {
2091 // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2092 // can send to its counterparty, but due to update ordering, the other side may not yet have
2093 // considered those HTLCs fully removed.
2094 // This tests that we don't count HTLCs which will not be included in the next remote
2095 // commitment transaction towards the reserve value (as it implies no commitment transaction
2096 // will be generated which violates the remote reserve value).
2097 // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2099 // * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2100 // you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2101 // you only consider the value of the first HTLC, it may not),
2102 // * start routing a third HTLC from A to B,
2103 // * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2104 // the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2105 // * deliver the first fulfill from B
2106 // * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2108 // * deliver A's response CS and RAA.
2109 // This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2110 // removed it fully. B now has the push_msat plus the first two HTLCs in value.
2111 // * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2112 // of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2113 let chanmon_cfgs = create_chanmon_cfgs(2);
2114 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2115 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2116 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2117 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2119 let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2120 // Route the first two HTLCs.
2121 let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2122 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2123 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2125 // Start routing the third HTLC (this is just used to get everyone in the right state).
2126 let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2128 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2129 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2130 check_added_monitors!(nodes[0], 1);
2131 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2132 assert_eq!(events.len(), 1);
2133 SendEvent::from_event(events.remove(0))
2136 // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2137 // initial fulfill/CS.
2138 nodes[1].node.claim_funds(payment_preimage_1);
2139 expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2140 check_added_monitors!(nodes[1], 1);
2141 let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2143 // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2144 // remove the second HTLC when we send the HTLC back from B to A.
2145 nodes[1].node.claim_funds(payment_preimage_2);
2146 expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2147 check_added_monitors!(nodes[1], 1);
2148 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2150 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2151 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2152 check_added_monitors!(nodes[0], 1);
2153 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2154 expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2156 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2157 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2158 check_added_monitors!(nodes[1], 1);
2159 // B is already AwaitingRAA, so cant generate a CS here
2160 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2162 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2163 check_added_monitors!(nodes[1], 1);
2164 let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2166 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2167 check_added_monitors!(nodes[0], 1);
2168 let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2170 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2171 check_added_monitors!(nodes[1], 1);
2172 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2174 // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2175 // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2176 // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2177 // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2178 // on-chain as necessary).
2179 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2180 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2181 check_added_monitors!(nodes[0], 1);
2182 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2183 expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
2185 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2186 check_added_monitors!(nodes[1], 1);
2187 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2189 expect_pending_htlcs_forwardable!(nodes[1]);
2190 expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2192 // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2193 // resolve the second HTLC from A's point of view.
2194 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2195 check_added_monitors!(nodes[0], 1);
2196 expect_payment_path_successful!(nodes[0]);
2197 let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2199 // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2200 // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2201 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2203 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2204 RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2205 check_added_monitors!(nodes[1], 1);
2206 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2207 assert_eq!(events.len(), 1);
2208 SendEvent::from_event(events.remove(0))
2211 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2212 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2213 check_added_monitors!(nodes[0], 1);
2214 let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2216 // Now just resolve all the outstanding messages/HTLCs for completeness...
2218 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2219 check_added_monitors!(nodes[1], 1);
2220 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2222 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2223 check_added_monitors!(nodes[1], 1);
2225 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2226 check_added_monitors!(nodes[0], 1);
2227 expect_payment_path_successful!(nodes[0]);
2228 let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2230 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2231 check_added_monitors!(nodes[1], 1);
2232 let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2234 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2235 check_added_monitors!(nodes[0], 1);
2237 expect_pending_htlcs_forwardable!(nodes[0]);
2238 expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2240 claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2241 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2245 fn channel_monitor_network_test() {
2246 // Simple test which builds a network of ChannelManagers, connects them to each other, and
2247 // tests that ChannelMonitor is able to recover from various states.
2248 let chanmon_cfgs = create_chanmon_cfgs(5);
2249 let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2250 let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2251 let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2253 // Create some initial channels
2254 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2255 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2256 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2257 let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2259 // Make sure all nodes are at the same starting height
2260 connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2261 connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2262 connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2263 connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2264 connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2266 // Rebalance the network a bit by relaying one payment through all the channels...
2267 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2268 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2269 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2270 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2272 // Simple case with no pending HTLCs:
2273 nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2274 check_added_monitors!(nodes[1], 1);
2275 check_closed_broadcast!(nodes[1], true);
2276 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
2278 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2279 assert_eq!(node_txn.len(), 1);
2280 mine_transaction(&nodes[1], &node_txn[0]);
2281 if nodes[1].connect_style.borrow().updates_best_block_first() {
2282 let _ = nodes[1].tx_broadcaster.txn_broadcast();
2285 mine_transaction(&nodes[0], &node_txn[0]);
2286 check_added_monitors!(nodes[0], 1);
2287 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2289 check_closed_broadcast!(nodes[0], true);
2290 assert_eq!(nodes[0].node.list_channels().len(), 0);
2291 assert_eq!(nodes[1].node.list_channels().len(), 1);
2292 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2294 // One pending HTLC is discarded by the force-close:
2295 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2297 // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2298 // broadcasted until we reach the timelock time).
2299 nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2300 check_closed_broadcast!(nodes[1], true);
2301 check_added_monitors!(nodes[1], 1);
2303 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2304 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2305 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2306 mine_transaction(&nodes[2], &node_txn[0]);
2307 check_added_monitors!(nodes[2], 1);
2308 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2310 check_closed_broadcast!(nodes[2], true);
2311 assert_eq!(nodes[1].node.list_channels().len(), 0);
2312 assert_eq!(nodes[2].node.list_channels().len(), 1);
2313 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
2314 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2316 macro_rules! claim_funds {
2317 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2319 $node.node.claim_funds($preimage);
2320 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2321 check_added_monitors!($node, 1);
2323 let events = $node.node.get_and_clear_pending_msg_events();
2324 assert_eq!(events.len(), 1);
2326 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2327 assert!(update_add_htlcs.is_empty());
2328 assert!(update_fail_htlcs.is_empty());
2329 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2331 _ => panic!("Unexpected event"),
2337 // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2338 // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2339 nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2340 check_added_monitors!(nodes[2], 1);
2341 check_closed_broadcast!(nodes[2], true);
2342 let node2_commitment_txid;
2344 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2345 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2346 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2347 node2_commitment_txid = node_txn[0].txid();
2349 // Claim the payment on nodes[3], giving it knowledge of the preimage
2350 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2351 mine_transaction(&nodes[3], &node_txn[0]);
2352 check_added_monitors!(nodes[3], 1);
2353 check_preimage_claim(&nodes[3], &node_txn);
2355 check_closed_broadcast!(nodes[3], true);
2356 assert_eq!(nodes[2].node.list_channels().len(), 0);
2357 assert_eq!(nodes[3].node.list_channels().len(), 1);
2358 check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
2359 check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2361 // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2362 // confusing us in the following tests.
2363 let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2365 // One pending HTLC to time out:
2366 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2367 // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2370 let (close_chan_update_1, close_chan_update_2) = {
2371 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2372 let events = nodes[3].node.get_and_clear_pending_msg_events();
2373 assert_eq!(events.len(), 2);
2374 let close_chan_update_1 = match events[1] {
2375 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2378 _ => panic!("Unexpected event"),
2381 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2382 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2384 _ => panic!("Unexpected event"),
2386 check_added_monitors!(nodes[3], 1);
2388 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2390 let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2391 node_txn.retain(|tx| {
2392 if tx.input[0].previous_output.txid == node2_commitment_txid {
2398 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2400 // Claim the payment on nodes[4], giving it knowledge of the preimage
2401 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2403 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2404 let events = nodes[4].node.get_and_clear_pending_msg_events();
2405 assert_eq!(events.len(), 2);
2406 let close_chan_update_2 = match events[1] {
2407 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2410 _ => panic!("Unexpected event"),
2413 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2414 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2416 _ => panic!("Unexpected event"),
2418 check_added_monitors!(nodes[4], 1);
2419 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2420 check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
2422 mine_transaction(&nodes[4], &node_txn[0]);
2423 check_preimage_claim(&nodes[4], &node_txn);
2424 (close_chan_update_1, close_chan_update_2)
2426 nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2427 nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2428 assert_eq!(nodes[3].node.list_channels().len(), 0);
2429 assert_eq!(nodes[4].node.list_channels().len(), 0);
2431 assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2432 Ok(ChannelMonitorUpdateStatus::Completed));
2433 check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
2437 fn test_justice_tx_htlc_timeout() {
2438 // Test justice txn built on revoked HTLC-Timeout tx, against both sides
2439 let mut alice_config = test_default_channel_config();
2440 alice_config.channel_handshake_config.announced_channel = true;
2441 alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2442 alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2443 let mut bob_config = test_default_channel_config();
2444 bob_config.channel_handshake_config.announced_channel = true;
2445 bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2446 bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2447 let user_cfgs = [Some(alice_config), Some(bob_config)];
2448 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2449 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2450 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2451 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2452 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2453 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2454 // Create some new channels:
2455 let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2457 // A pending HTLC which will be revoked:
2458 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2459 // Get the will-be-revoked local txn from nodes[0]
2460 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2461 assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2462 assert_eq!(revoked_local_txn[0].input.len(), 1);
2463 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2464 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2465 assert_eq!(revoked_local_txn[1].input.len(), 1);
2466 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2467 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2468 // Revoke the old state
2469 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2472 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2474 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2475 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2476 assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2477 check_spends!(node_txn[0], revoked_local_txn[0]);
2478 node_txn.swap_remove(0);
2480 check_added_monitors!(nodes[1], 1);
2481 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2482 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2484 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2485 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2486 // Verify broadcast of revoked HTLC-timeout
2487 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2488 check_added_monitors!(nodes[0], 1);
2489 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2490 // Broadcast revoked HTLC-timeout on node 1
2491 mine_transaction(&nodes[1], &node_txn[1]);
2492 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2494 get_announce_close_broadcast_events(&nodes, 0, 1);
2495 assert_eq!(nodes[0].node.list_channels().len(), 0);
2496 assert_eq!(nodes[1].node.list_channels().len(), 0);
2500 fn test_justice_tx_htlc_success() {
2501 // Test justice txn built on revoked HTLC-Success tx, against both sides
2502 let mut alice_config = test_default_channel_config();
2503 alice_config.channel_handshake_config.announced_channel = true;
2504 alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2505 alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2506 let mut bob_config = test_default_channel_config();
2507 bob_config.channel_handshake_config.announced_channel = true;
2508 bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2509 bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2510 let user_cfgs = [Some(alice_config), Some(bob_config)];
2511 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2512 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2513 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2514 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2515 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2516 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2517 // Create some new channels:
2518 let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2520 // A pending HTLC which will be revoked:
2521 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2522 // Get the will-be-revoked local txn from B
2523 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2524 assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2525 assert_eq!(revoked_local_txn[0].input.len(), 1);
2526 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2527 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2528 // Revoke the old state
2529 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2531 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2533 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2534 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2535 assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2537 check_spends!(node_txn[0], revoked_local_txn[0]);
2538 node_txn.swap_remove(0);
2540 check_added_monitors!(nodes[0], 1);
2541 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2543 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2544 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2545 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2546 check_added_monitors!(nodes[1], 1);
2547 mine_transaction(&nodes[0], &node_txn[1]);
2548 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2549 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2551 get_announce_close_broadcast_events(&nodes, 0, 1);
2552 assert_eq!(nodes[0].node.list_channels().len(), 0);
2553 assert_eq!(nodes[1].node.list_channels().len(), 0);
2557 fn revoked_output_claim() {
2558 // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2559 // transaction is broadcast by its counterparty
2560 let chanmon_cfgs = create_chanmon_cfgs(2);
2561 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2562 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2563 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2564 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2565 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2566 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2567 assert_eq!(revoked_local_txn.len(), 1);
2568 // Only output is the full channel value back to nodes[0]:
2569 assert_eq!(revoked_local_txn[0].output.len(), 1);
2570 // Send a payment through, updating everyone's latest commitment txn
2571 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2573 // Inform nodes[1] that nodes[0] broadcast a stale tx
2574 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2575 check_added_monitors!(nodes[1], 1);
2576 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2577 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2578 assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2580 check_spends!(node_txn[0], revoked_local_txn[0]);
2582 // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2583 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2584 get_announce_close_broadcast_events(&nodes, 0, 1);
2585 check_added_monitors!(nodes[0], 1);
2586 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2590 fn test_forming_justice_tx_from_monitor_updates() {
2591 do_test_forming_justice_tx_from_monitor_updates(true);
2592 do_test_forming_justice_tx_from_monitor_updates(false);
2595 fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) {
2596 // Simple test to make sure that the justice tx formed in WatchtowerPersister
2597 // is properly formed and can be broadcasted/confirmed successfully in the event
2598 // that a revoked commitment transaction is broadcasted
2599 // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
2600 let chanmon_cfgs = create_chanmon_cfgs(2);
2601 let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap();
2602 let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap();
2603 let persisters = vec![WatchtowerPersister::new(destination_script0),
2604 WatchtowerPersister::new(destination_script1)];
2605 let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
2606 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2607 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2608 let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2609 let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
2611 if !broadcast_initial_commitment {
2612 // Send a payment to move the channel forward
2613 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2616 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output.
2617 // We'll keep this commitment transaction to broadcast once it's revoked.
2618 let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id);
2619 assert_eq!(revoked_local_txn.len(), 1);
2620 let revoked_commitment_tx = &revoked_local_txn[0];
2622 // Send another payment, now revoking the previous commitment tx
2623 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2625 let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap();
2626 check_spends!(justice_tx, revoked_commitment_tx);
2628 mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]);
2629 mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]);
2631 check_added_monitors!(nodes[1], 1);
2632 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
2633 &[nodes[0].node.get_our_node_id()], 100_000);
2634 get_announce_close_broadcast_events(&nodes, 1, 0);
2636 check_added_monitors!(nodes[0], 1);
2637 check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
2638 &[nodes[1].node.get_our_node_id()], 100_000);
2640 // Check that the justice tx has sent the revoked output value to nodes[1]
2641 let monitor = get_monitor!(nodes[1], channel_id);
2642 let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| {
2644 channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis,
2645 _ => panic!("Unexpected balance type"),
2648 // On the first commitment, node[1]'s balance was below dust so it didn't have an output
2649 let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value };
2650 let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value;
2651 assert_eq!(total_claimable_balance, expected_claimable_balance);
2656 fn claim_htlc_outputs_shared_tx() {
2657 // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2658 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2659 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2660 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2661 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2662 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2664 // Create some new channel:
2665 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2667 // Rebalance the network to generate htlc in the two directions
2668 send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2669 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2670 let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2671 let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2673 // Get the will-be-revoked local txn from node[0]
2674 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2675 assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2676 assert_eq!(revoked_local_txn[0].input.len(), 1);
2677 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2678 assert_eq!(revoked_local_txn[1].input.len(), 1);
2679 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2680 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2681 check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2683 //Revoke the old state
2684 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2687 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2688 check_added_monitors!(nodes[0], 1);
2689 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2690 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2691 check_added_monitors!(nodes[1], 1);
2692 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2693 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2694 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2696 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2697 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2699 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2700 check_spends!(node_txn[0], revoked_local_txn[0]);
2702 let mut witness_lens = BTreeSet::new();
2703 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2704 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2705 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2706 assert_eq!(witness_lens.len(), 3);
2707 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2708 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2709 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2711 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2712 // ANTI_REORG_DELAY confirmations.
2713 mine_transaction(&nodes[1], &node_txn[0]);
2714 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2715 expect_payment_failed!(nodes[1], payment_hash_2, false);
2717 get_announce_close_broadcast_events(&nodes, 0, 1);
2718 assert_eq!(nodes[0].node.list_channels().len(), 0);
2719 assert_eq!(nodes[1].node.list_channels().len(), 0);
2723 fn claim_htlc_outputs_single_tx() {
2724 // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2725 let mut chanmon_cfgs = create_chanmon_cfgs(2);
2726 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2727 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2728 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2729 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2731 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2733 // Rebalance the network to generate htlc in the two directions
2734 send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2735 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2736 // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2737 let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2738 let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2740 // Get the will-be-revoked local txn from node[0]
2741 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2743 //Revoke the old state
2744 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2747 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2748 check_added_monitors!(nodes[0], 1);
2749 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2750 check_added_monitors!(nodes[1], 1);
2751 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2752 let mut events = nodes[0].node.get_and_clear_pending_events();
2753 expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
2754 match events.last().unwrap() {
2755 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2756 _ => panic!("Unexpected event"),
2759 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2760 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2762 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcast();
2764 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2765 assert_eq!(node_txn[0].input.len(), 1);
2766 check_spends!(node_txn[0], chan_1.3);
2767 assert_eq!(node_txn[1].input.len(), 1);
2768 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2769 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2770 check_spends!(node_txn[1], node_txn[0]);
2772 // Filter out any non justice transactions.
2773 node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].txid());
2774 assert!(node_txn.len() > 3);
2776 assert_eq!(node_txn[0].input.len(), 1);
2777 assert_eq!(node_txn[1].input.len(), 1);
2778 assert_eq!(node_txn[2].input.len(), 1);
2780 check_spends!(node_txn[0], revoked_local_txn[0]);
2781 check_spends!(node_txn[1], revoked_local_txn[0]);
2782 check_spends!(node_txn[2], revoked_local_txn[0]);
2784 let mut witness_lens = BTreeSet::new();
2785 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2786 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
2787 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2788 assert_eq!(witness_lens.len(), 3);
2789 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2790 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2791 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2793 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2794 // ANTI_REORG_DELAY confirmations.
2795 mine_transaction(&nodes[1], &node_txn[0]);
2796 mine_transaction(&nodes[1], &node_txn[1]);
2797 mine_transaction(&nodes[1], &node_txn[2]);
2798 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2799 expect_payment_failed!(nodes[1], payment_hash_2, false);
2801 get_announce_close_broadcast_events(&nodes, 0, 1);
2802 assert_eq!(nodes[0].node.list_channels().len(), 0);
2803 assert_eq!(nodes[1].node.list_channels().len(), 0);
2807 fn test_htlc_on_chain_success() {
2808 // Test that in case of a unilateral close onchain, we detect the state of output and pass
2809 // the preimage backward accordingly. So here we test that ChannelManager is
2810 // broadcasting the right event to other nodes in payment path.
2811 // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2812 // A --------------------> B ----------------------> C (preimage)
2813 // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2814 // commitment transaction was broadcast.
2815 // Then, B should learn the preimage from said transactions, attempting to claim backwards
2817 // B should be able to claim via preimage if A then broadcasts its local tx.
2818 // Finally, when A sees B's latest local commitment transaction it should be able to claim
2819 // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2820 // PaymentSent event).
2822 let chanmon_cfgs = create_chanmon_cfgs(3);
2823 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2824 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2825 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2827 // Create some initial channels
2828 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2829 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2831 // Ensure all nodes are at the same height
2832 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2833 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2834 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2835 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2837 // Rebalance the network a bit by relaying one payment through all the channels...
2838 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2839 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2841 let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2842 let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2844 // Broadcast legit commitment tx from C on B's chain
2845 // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2846 let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2847 assert_eq!(commitment_tx.len(), 1);
2848 check_spends!(commitment_tx[0], chan_2.3);
2849 nodes[2].node.claim_funds(our_payment_preimage);
2850 expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2851 nodes[2].node.claim_funds(our_payment_preimage_2);
2852 expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2853 check_added_monitors!(nodes[2], 2);
2854 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2855 assert!(updates.update_add_htlcs.is_empty());
2856 assert!(updates.update_fail_htlcs.is_empty());
2857 assert!(updates.update_fail_malformed_htlcs.is_empty());
2858 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2860 mine_transaction(&nodes[2], &commitment_tx[0]);
2861 check_closed_broadcast!(nodes[2], true);
2862 check_added_monitors!(nodes[2], 1);
2863 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2864 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2865 assert_eq!(node_txn.len(), 2);
2866 check_spends!(node_txn[0], commitment_tx[0]);
2867 check_spends!(node_txn[1], commitment_tx[0]);
2868 assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2869 assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2870 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2871 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2872 assert_eq!(node_txn[0].lock_time, LockTime::ZERO);
2873 assert_eq!(node_txn[1].lock_time, LockTime::ZERO);
2875 // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2876 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
2877 connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2879 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2880 assert_eq!(added_monitors.len(), 1);
2881 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2882 added_monitors.clear();
2884 let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2885 assert_eq!(forwarded_events.len(), 3);
2886 match forwarded_events[0] {
2887 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2888 _ => panic!("Unexpected event"),
2890 let chan_id = Some(chan_1.2);
2891 match forwarded_events[1] {
2892 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2893 next_channel_id, outbound_amount_forwarded_msat, ..
2895 assert_eq!(total_fee_earned_msat, Some(1000));
2896 assert_eq!(prev_channel_id, chan_id);
2897 assert_eq!(claim_from_onchain_tx, true);
2898 assert_eq!(next_channel_id, Some(chan_2.2));
2899 assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2903 match forwarded_events[2] {
2904 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2905 next_channel_id, outbound_amount_forwarded_msat, ..
2907 assert_eq!(total_fee_earned_msat, Some(1000));
2908 assert_eq!(prev_channel_id, chan_id);
2909 assert_eq!(claim_from_onchain_tx, true);
2910 assert_eq!(next_channel_id, Some(chan_2.2));
2911 assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2915 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2917 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2918 assert_eq!(added_monitors.len(), 2);
2919 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2920 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2921 added_monitors.clear();
2923 assert_eq!(events.len(), 3);
2925 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2926 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2928 match nodes_2_event {
2929 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
2930 _ => panic!("Unexpected event"),
2933 match nodes_0_event {
2934 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2935 assert!(update_add_htlcs.is_empty());
2936 assert!(update_fail_htlcs.is_empty());
2937 assert_eq!(update_fulfill_htlcs.len(), 1);
2938 assert!(update_fail_malformed_htlcs.is_empty());
2939 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2941 _ => panic!("Unexpected event"),
2944 // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2946 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2947 _ => panic!("Unexpected event"),
2950 macro_rules! check_tx_local_broadcast {
2951 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2952 let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2953 assert_eq!(node_txn.len(), 2);
2954 // Node[1]: 2 * HTLC-timeout tx
2955 // Node[0]: 2 * HTLC-timeout tx
2956 check_spends!(node_txn[0], $commitment_tx);
2957 check_spends!(node_txn[1], $commitment_tx);
2958 assert_ne!(node_txn[0].lock_time, LockTime::ZERO);
2959 assert_ne!(node_txn[1].lock_time, LockTime::ZERO);
2961 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2962 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2963 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2964 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2966 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2967 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2968 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2969 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2974 // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
2975 check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
2977 // Broadcast legit commitment tx from A on B's chain
2978 // Broadcast preimage tx by B on offered output from A commitment tx on A's chain
2979 let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
2980 check_spends!(node_a_commitment_tx[0], chan_1.3);
2981 mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
2982 check_closed_broadcast!(nodes[1], true);
2983 check_added_monitors!(nodes[1], 1);
2984 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2985 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2986 assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
2987 let commitment_spend =
2988 if node_txn.len() == 1 {
2991 // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
2992 // FullBlockViaListen
2993 if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
2994 check_spends!(node_txn[1], commitment_tx[0]);
2995 check_spends!(node_txn[2], commitment_tx[0]);
2996 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
2999 check_spends!(node_txn[0], commitment_tx[0]);
3000 check_spends!(node_txn[1], commitment_tx[0]);
3001 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
3006 check_spends!(commitment_spend, node_a_commitment_tx[0]);
3007 assert_eq!(commitment_spend.input.len(), 2);
3008 assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3009 assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3010 assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1);
3011 assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
3012 // We don't bother to check that B can claim the HTLC output on its commitment tx here as
3013 // we already checked the same situation with A.
3015 // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
3016 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
3017 connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3018 check_closed_broadcast!(nodes[0], true);
3019 check_added_monitors!(nodes[0], 1);
3020 let events = nodes[0].node.get_and_clear_pending_events();
3021 assert_eq!(events.len(), 5);
3022 let mut first_claimed = false;
3023 for event in events {
3025 Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3026 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
3027 assert!(!first_claimed);
3028 first_claimed = true;
3030 assert_eq!(payment_preimage, our_payment_preimage_2);
3031 assert_eq!(payment_hash, payment_hash_2);
3034 Event::PaymentPathSuccessful { .. } => {},
3035 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
3036 _ => panic!("Unexpected event"),
3039 check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
3042 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3043 // Test that in case of a unilateral close onchain, we detect the state of output and
3044 // timeout the HTLC backward accordingly. So here we test that ChannelManager is
3045 // broadcasting the right event to other nodes in payment path.
3046 // A ------------------> B ----------------------> C (timeout)
3047 // B's commitment tx C's commitment tx
3049 // B's HTLC timeout tx B's timeout tx
3051 let chanmon_cfgs = create_chanmon_cfgs(3);
3052 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3053 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3054 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3055 *nodes[0].connect_style.borrow_mut() = connect_style;
3056 *nodes[1].connect_style.borrow_mut() = connect_style;
3057 *nodes[2].connect_style.borrow_mut() = connect_style;
3059 // Create some intial channels
3060 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3061 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3063 // Rebalance the network a bit by relaying one payment thorugh all the channels...
3064 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3065 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3067 let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
3069 // Broadcast legit commitment tx from C on B's chain
3070 let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
3071 check_spends!(commitment_tx[0], chan_2.3);
3072 nodes[2].node.fail_htlc_backwards(&payment_hash);
3073 check_added_monitors!(nodes[2], 0);
3074 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
3075 check_added_monitors!(nodes[2], 1);
3077 let events = nodes[2].node.get_and_clear_pending_msg_events();
3078 assert_eq!(events.len(), 1);
3080 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3081 assert!(update_add_htlcs.is_empty());
3082 assert!(!update_fail_htlcs.is_empty());
3083 assert!(update_fulfill_htlcs.is_empty());
3084 assert!(update_fail_malformed_htlcs.is_empty());
3085 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
3087 _ => panic!("Unexpected event"),
3089 mine_transaction(&nodes[2], &commitment_tx[0]);
3090 check_closed_broadcast!(nodes[2], true);
3091 check_added_monitors!(nodes[2], 1);
3092 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3093 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3094 assert_eq!(node_txn.len(), 0);
3096 // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3097 // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3098 mine_transaction(&nodes[1], &commitment_tx[0]);
3099 check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3100 , [nodes[2].node.get_our_node_id()], 100000);
3101 connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3103 let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
3104 if nodes[1].connect_style.borrow().skips_blocks() {
3105 assert_eq!(txn.len(), 1);
3107 assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction
3109 txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0]));
3110 assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3114 mine_transaction(&nodes[1], &timeout_tx);
3115 check_added_monitors!(nodes[1], 1);
3116 check_closed_broadcast!(nodes[1], true);
3118 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3120 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3121 check_added_monitors!(nodes[1], 1);
3122 let events = nodes[1].node.get_and_clear_pending_msg_events();
3123 assert_eq!(events.len(), 1);
3125 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3126 assert!(update_add_htlcs.is_empty());
3127 assert!(!update_fail_htlcs.is_empty());
3128 assert!(update_fulfill_htlcs.is_empty());
3129 assert!(update_fail_malformed_htlcs.is_empty());
3130 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3132 _ => panic!("Unexpected event"),
3135 // Broadcast legit commitment tx from B on A's chain
3136 let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3137 check_spends!(commitment_tx[0], chan_1.3);
3139 mine_transaction(&nodes[0], &commitment_tx[0]);
3140 connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3142 check_closed_broadcast!(nodes[0], true);
3143 check_added_monitors!(nodes[0], 1);
3144 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3145 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3146 assert_eq!(node_txn.len(), 1);
3147 check_spends!(node_txn[0], commitment_tx[0]);
3148 assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3152 fn test_htlc_on_chain_timeout() {
3153 do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3154 do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3155 do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3159 fn test_simple_commitment_revoked_fail_backward() {
3160 // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3161 // and fail backward accordingly.
3163 let chanmon_cfgs = create_chanmon_cfgs(3);
3164 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3165 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3166 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3168 // Create some initial channels
3169 create_announced_chan_between_nodes(&nodes, 0, 1);
3170 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3172 let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3173 // Get the will-be-revoked local txn from nodes[2]
3174 let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3175 // Revoke the old state
3176 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3178 let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3180 mine_transaction(&nodes[1], &revoked_local_txn[0]);
3181 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3182 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3183 check_added_monitors!(nodes[1], 1);
3184 check_closed_broadcast!(nodes[1], true);
3186 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3187 check_added_monitors!(nodes[1], 1);
3188 let events = nodes[1].node.get_and_clear_pending_msg_events();
3189 assert_eq!(events.len(), 1);
3191 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3192 assert!(update_add_htlcs.is_empty());
3193 assert_eq!(update_fail_htlcs.len(), 1);
3194 assert!(update_fulfill_htlcs.is_empty());
3195 assert!(update_fail_malformed_htlcs.is_empty());
3196 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3198 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3199 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3200 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3202 _ => panic!("Unexpected event"),
3206 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3207 // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3208 // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3209 // commitment transaction anymore.
3210 // To do this, we have the peer which will broadcast a revoked commitment transaction send
3211 // a number of update_fail/commitment_signed updates without ever sending the RAA in
3212 // response to our commitment_signed. This is somewhat misbehavior-y, though not
3213 // technically disallowed and we should probably handle it reasonably.
3214 // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3215 // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3217 // * Once we move it out of our holding cell/add it, we will immediately include it in a
3218 // commitment_signed (implying it will be in the latest remote commitment transaction).
3219 // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3220 // and once they revoke the previous commitment transaction (allowing us to send a new
3221 // commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3222 let chanmon_cfgs = create_chanmon_cfgs(3);
3223 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3224 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3225 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3227 // Create some initial channels
3228 create_announced_chan_between_nodes(&nodes, 0, 1);
3229 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3231 let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3232 // Get the will-be-revoked local txn from nodes[2]
3233 let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3234 assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3235 // Revoke the old state
3236 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3238 let value = if use_dust {
3239 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3240 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3241 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3242 .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
3245 let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3246 let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3247 let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3249 nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3250 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3251 check_added_monitors!(nodes[2], 1);
3252 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3253 assert!(updates.update_add_htlcs.is_empty());
3254 assert!(updates.update_fulfill_htlcs.is_empty());
3255 assert!(updates.update_fail_malformed_htlcs.is_empty());
3256 assert_eq!(updates.update_fail_htlcs.len(), 1);
3257 assert!(updates.update_fee.is_none());
3258 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3259 let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3260 // Drop the last RAA from 3 -> 2
3262 nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3263 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3264 check_added_monitors!(nodes[2], 1);
3265 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3266 assert!(updates.update_add_htlcs.is_empty());
3267 assert!(updates.update_fulfill_htlcs.is_empty());
3268 assert!(updates.update_fail_malformed_htlcs.is_empty());
3269 assert_eq!(updates.update_fail_htlcs.len(), 1);
3270 assert!(updates.update_fee.is_none());
3271 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3272 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3273 check_added_monitors!(nodes[1], 1);
3274 // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3275 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3276 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3277 check_added_monitors!(nodes[2], 1);
3279 nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3280 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3281 check_added_monitors!(nodes[2], 1);
3282 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3283 assert!(updates.update_add_htlcs.is_empty());
3284 assert!(updates.update_fulfill_htlcs.is_empty());
3285 assert!(updates.update_fail_malformed_htlcs.is_empty());
3286 assert_eq!(updates.update_fail_htlcs.len(), 1);
3287 assert!(updates.update_fee.is_none());
3288 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3289 // At this point first_payment_hash has dropped out of the latest two commitment
3290 // transactions that nodes[1] is tracking...
3291 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3292 check_added_monitors!(nodes[1], 1);
3293 // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3294 let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3295 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3296 check_added_monitors!(nodes[2], 1);
3298 // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3299 // on nodes[2]'s RAA.
3300 let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3301 nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3302 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3303 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3304 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3305 check_added_monitors!(nodes[1], 0);
3308 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3309 // One monitor for the new revocation preimage, no second on as we won't generate a new
3310 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3311 check_added_monitors!(nodes[1], 1);
3312 let events = nodes[1].node.get_and_clear_pending_events();
3313 assert_eq!(events.len(), 2);
3315 Event::HTLCHandlingFailed { .. } => { },
3316 _ => panic!("Unexpected event"),
3319 Event::PendingHTLCsForwardable { .. } => { },
3320 _ => panic!("Unexpected event"),
3322 // Deliberately don't process the pending fail-back so they all fail back at once after
3323 // block connection just like the !deliver_bs_raa case
3326 let mut failed_htlcs = new_hash_set();
3327 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3329 mine_transaction(&nodes[1], &revoked_local_txn[0]);
3330 check_added_monitors!(nodes[1], 1);
3331 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3333 let events = nodes[1].node.get_and_clear_pending_events();
3334 assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3335 assert!(events.iter().any(|ev| matches!(
3337 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
3339 assert!(events.iter().any(|ev| matches!(
3341 Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3343 assert!(events.iter().any(|ev| matches!(
3345 Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3348 nodes[1].node.process_pending_htlc_forwards();
3349 check_added_monitors!(nodes[1], 1);
3351 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3352 assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3355 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3356 match nodes_2_event {
3357 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3358 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3359 assert_eq!(update_add_htlcs.len(), 1);
3360 assert!(update_fulfill_htlcs.is_empty());
3361 assert!(update_fail_htlcs.is_empty());
3362 assert!(update_fail_malformed_htlcs.is_empty());
3364 _ => panic!("Unexpected event"),
3368 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3369 match nodes_2_event {
3370 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => {
3371 assert_eq!(channel_id, chan_2.2);
3372 assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3374 _ => panic!("Unexpected event"),
3377 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3378 match nodes_0_event {
3379 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3380 assert!(update_add_htlcs.is_empty());
3381 assert_eq!(update_fail_htlcs.len(), 3);
3382 assert!(update_fulfill_htlcs.is_empty());
3383 assert!(update_fail_malformed_htlcs.is_empty());
3384 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3386 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3387 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3388 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3390 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3392 let events = nodes[0].node.get_and_clear_pending_events();
3393 assert_eq!(events.len(), 6);
3395 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3396 assert!(failed_htlcs.insert(payment_hash.0));
3397 // If we delivered B's RAA we got an unknown preimage error, not something
3398 // that we should update our routing table for.
3399 if !deliver_bs_raa {
3400 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3403 _ => panic!("Unexpected event"),
3406 Event::PaymentFailed { ref payment_hash, .. } => {
3407 assert_eq!(*payment_hash, first_payment_hash);
3409 _ => panic!("Unexpected event"),
3412 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3413 assert!(failed_htlcs.insert(payment_hash.0));
3415 _ => panic!("Unexpected event"),
3418 Event::PaymentFailed { ref payment_hash, .. } => {
3419 assert_eq!(*payment_hash, second_payment_hash);
3421 _ => panic!("Unexpected event"),
3424 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3425 assert!(failed_htlcs.insert(payment_hash.0));
3427 _ => panic!("Unexpected event"),
3430 Event::PaymentFailed { ref payment_hash, .. } => {
3431 assert_eq!(*payment_hash, third_payment_hash);
3433 _ => panic!("Unexpected event"),
3436 _ => panic!("Unexpected event"),
3439 // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3441 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3442 _ => panic!("Unexpected event"),
3445 assert!(failed_htlcs.contains(&first_payment_hash.0));
3446 assert!(failed_htlcs.contains(&second_payment_hash.0));
3447 assert!(failed_htlcs.contains(&third_payment_hash.0));
3451 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3452 do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3453 do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3454 do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3455 do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3459 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3460 do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3461 do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3462 do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3463 do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3467 fn fail_backward_pending_htlc_upon_channel_failure() {
3468 let chanmon_cfgs = create_chanmon_cfgs(2);
3469 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3470 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3471 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3472 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3474 // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3476 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3477 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3478 PaymentId(payment_hash.0)).unwrap();
3479 check_added_monitors!(nodes[0], 1);
3481 let payment_event = {
3482 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3483 assert_eq!(events.len(), 1);
3484 SendEvent::from_event(events.remove(0))
3486 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3487 assert_eq!(payment_event.msgs.len(), 1);
3490 // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3491 let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3493 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3494 RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3495 check_added_monitors!(nodes[0], 0);
3497 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3500 // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3502 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3504 let secp_ctx = Secp256k1::new();
3505 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3506 let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
3507 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
3508 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
3509 &route.paths[0], 50_000, &recipient_onion_fields, current_height, &None).unwrap();
3510 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3511 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
3513 // Send a 0-msat update_add_htlc to fail the channel.
3514 let update_add_htlc = msgs::UpdateAddHTLC {
3520 onion_routing_packet,
3521 skimmed_fee_msat: None,
3522 blinding_point: None,
3524 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3526 let events = nodes[0].node.get_and_clear_pending_events();
3527 assert_eq!(events.len(), 3);
3528 // Check that Alice fails backward the pending HTLC from the second payment.
3530 Event::PaymentPathFailed { payment_hash, .. } => {
3531 assert_eq!(payment_hash, failed_payment_hash);
3533 _ => panic!("Unexpected event"),
3536 Event::PaymentFailed { payment_hash, .. } => {
3537 assert_eq!(payment_hash, failed_payment_hash);
3539 _ => panic!("Unexpected event"),
3542 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3543 assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3545 _ => panic!("Unexpected event {:?}", events[1]),
3547 check_closed_broadcast!(nodes[0], true);
3548 check_added_monitors!(nodes[0], 1);
3552 fn test_htlc_ignore_latest_remote_commitment() {
3553 // Test that HTLC transactions spending the latest remote commitment transaction are simply
3554 // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3555 let chanmon_cfgs = create_chanmon_cfgs(2);
3556 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3557 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3558 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3559 if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3560 // We rely on the ability to connect a block redundantly, which isn't allowed via
3561 // `chain::Listen`, so we never run the test if we randomly get assigned that
3565 let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
3567 route_payment(&nodes[0], &[&nodes[1]], 10000000);
3568 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3569 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3570 check_closed_broadcast!(nodes[0], true);
3571 check_added_monitors!(nodes[0], 1);
3572 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3574 let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
3575 assert_eq!(node_txn.len(), 2);
3576 check_spends!(node_txn[0], funding_tx);
3577 check_spends!(node_txn[1], node_txn[0]);
3579 let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
3580 connect_block(&nodes[1], &block);
3581 check_closed_broadcast!(nodes[1], true);
3582 check_added_monitors!(nodes[1], 1);
3583 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3585 // Duplicate the connect_block call since this may happen due to other listeners
3586 // registering new transactions
3587 connect_block(&nodes[1], &block);
3591 fn test_force_close_fail_back() {
3592 // Check which HTLCs are failed-backwards on channel force-closure
3593 let chanmon_cfgs = create_chanmon_cfgs(3);
3594 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3595 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3596 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3597 create_announced_chan_between_nodes(&nodes, 0, 1);
3598 create_announced_chan_between_nodes(&nodes, 1, 2);
3600 let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3602 let mut payment_event = {
3603 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3604 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3605 check_added_monitors!(nodes[0], 1);
3607 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3608 assert_eq!(events.len(), 1);
3609 SendEvent::from_event(events.remove(0))
3612 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3613 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3615 expect_pending_htlcs_forwardable!(nodes[1]);
3617 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3618 assert_eq!(events_2.len(), 1);
3619 payment_event = SendEvent::from_event(events_2.remove(0));
3620 assert_eq!(payment_event.msgs.len(), 1);
3622 check_added_monitors!(nodes[1], 1);
3623 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3624 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3625 check_added_monitors!(nodes[2], 1);
3626 let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3628 // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3629 // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3630 // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3632 nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3633 check_closed_broadcast!(nodes[2], true);
3634 check_added_monitors!(nodes[2], 1);
3635 check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3636 let commitment_tx = {
3637 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3638 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3639 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3640 // back to nodes[1] upon timeout otherwise.
3641 assert_eq!(node_txn.len(), 1);
3645 mine_transaction(&nodes[1], &commitment_tx);
3647 // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3648 check_closed_broadcast!(nodes[1], true);
3649 check_added_monitors!(nodes[1], 1);
3650 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3652 // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3654 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3655 .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3657 mine_transaction(&nodes[2], &commitment_tx);
3658 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
3659 assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
3660 let htlc_tx = node_txn.pop().unwrap();
3661 assert_eq!(htlc_tx.input.len(), 1);
3662 assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
3663 assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
3664 assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
3666 check_spends!(htlc_tx, commitment_tx);
3670 fn test_dup_events_on_peer_disconnect() {
3671 // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3672 // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3673 // as we used to generate the event immediately upon receipt of the payment preimage in the
3674 // update_fulfill_htlc message.
3676 let chanmon_cfgs = create_chanmon_cfgs(2);
3677 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3678 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3679 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3680 create_announced_chan_between_nodes(&nodes, 0, 1);
3682 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3684 nodes[1].node.claim_funds(payment_preimage);
3685 expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3686 check_added_monitors!(nodes[1], 1);
3687 let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3688 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3689 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
3691 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3692 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3694 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3695 reconnect_args.pending_htlc_claims.0 = 1;
3696 reconnect_nodes(reconnect_args);
3697 expect_payment_path_successful!(nodes[0]);
3701 fn test_peer_disconnected_before_funding_broadcasted() {
3702 // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3703 // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
3704 let chanmon_cfgs = create_chanmon_cfgs(2);
3705 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3706 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3707 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3709 // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3710 // broadcasted, even though it's created by `nodes[0]`.
3711 let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
3712 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3713 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3714 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3715 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3717 let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3718 assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3720 assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3722 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3723 assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3725 // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3726 // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3729 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3732 // The peers disconnect before the funding is broadcasted.
3733 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3734 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3736 // The time for peers to reconnect expires.
3737 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
3738 nodes[0].node.timer_tick_occurred();
3741 // Ensure that the channel is closed with `ClosureReason::HolderForceClosed`
3742 // when the peers are disconnected and do not reconnect before the funding
3743 // transaction is broadcasted.
3744 check_closed_event!(&nodes[0], 2, ClosureReason::HolderForceClosed, true
3745 , [nodes[1].node.get_our_node_id()], 1000000);
3746 check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
3747 , [nodes[0].node.get_our_node_id()], 1000000);
3751 fn test_simple_peer_disconnect() {
3752 // Test that we can reconnect when there are no lost messages
3753 let chanmon_cfgs = create_chanmon_cfgs(3);
3754 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3755 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3756 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3757 create_announced_chan_between_nodes(&nodes, 0, 1);
3758 create_announced_chan_between_nodes(&nodes, 1, 2);
3760 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3761 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3762 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3763 reconnect_args.send_channel_ready = (true, true);
3764 reconnect_nodes(reconnect_args);
3766 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3767 let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3768 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3769 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3771 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3772 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3773 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3775 let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3776 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3777 let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3778 let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3780 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3781 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3783 claim_payment_along_route(
3784 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_3)
3787 fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3789 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3790 reconnect_args.pending_cell_htlc_fails.0 = 1;
3791 reconnect_args.pending_cell_htlc_claims.0 = 1;
3792 reconnect_nodes(reconnect_args);
3794 let events = nodes[0].node.get_and_clear_pending_events();
3795 assert_eq!(events.len(), 4);
3797 Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3798 assert_eq!(payment_preimage, payment_preimage_3);
3799 assert_eq!(payment_hash, payment_hash_3);
3801 _ => panic!("Unexpected event"),
3804 Event::PaymentPathSuccessful { .. } => {},
3805 _ => panic!("Unexpected event"),
3808 Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3809 assert_eq!(payment_hash, payment_hash_5);
3810 assert!(payment_failed_permanently);
3812 _ => panic!("Unexpected event"),
3815 Event::PaymentFailed { payment_hash, .. } => {
3816 assert_eq!(payment_hash, payment_hash_5);
3818 _ => panic!("Unexpected event"),
3821 check_added_monitors(&nodes[0], 1);
3823 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3824 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3827 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3828 // Test that we can reconnect when in-flight HTLC updates get dropped
3829 let chanmon_cfgs = create_chanmon_cfgs(2);
3830 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3831 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3832 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3834 let mut as_channel_ready = None;
3835 let channel_id = if messages_delivered == 0 {
3836 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3837 as_channel_ready = Some(channel_ready);
3838 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3839 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3840 // it before the channel_reestablish message.
3843 create_announced_chan_between_nodes(&nodes, 0, 1).2
3846 let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3848 let payment_event = {
3849 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3850 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3851 check_added_monitors!(nodes[0], 1);
3853 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3854 assert_eq!(events.len(), 1);
3855 SendEvent::from_event(events.remove(0))
3857 assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3859 if messages_delivered < 2 {
3860 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3862 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3863 if messages_delivered >= 3 {
3864 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3865 check_added_monitors!(nodes[1], 1);
3866 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3868 if messages_delivered >= 4 {
3869 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3870 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3871 check_added_monitors!(nodes[0], 1);
3873 if messages_delivered >= 5 {
3874 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3875 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3876 // No commitment_signed so get_event_msg's assert(len == 1) passes
3877 check_added_monitors!(nodes[0], 1);
3879 if messages_delivered >= 6 {
3880 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3881 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3882 check_added_monitors!(nodes[1], 1);
3889 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3890 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3891 if messages_delivered < 3 {
3892 if simulate_broken_lnd {
3893 // lnd has a long-standing bug where they send a channel_ready prior to a
3894 // channel_reestablish if you reconnect prior to channel_ready time.
3896 // Here we simulate that behavior, delivering a channel_ready immediately on
3897 // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3898 // in `reconnect_nodes` but we currently don't fail based on that.
3900 // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3901 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3903 // Even if the channel_ready messages get exchanged, as long as nothing further was
3904 // received on either side, both sides will need to resend them.
3905 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3906 reconnect_args.send_channel_ready = (true, true);
3907 reconnect_args.pending_htlc_adds.1 = 1;
3908 reconnect_nodes(reconnect_args);
3909 } else if messages_delivered == 3 {
3910 // nodes[0] still wants its RAA + commitment_signed
3911 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3912 reconnect_args.pending_responding_commitment_signed.0 = true;
3913 reconnect_args.pending_raa.0 = true;
3914 reconnect_nodes(reconnect_args);
3915 } else if messages_delivered == 4 {
3916 // nodes[0] still wants its commitment_signed
3917 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3918 reconnect_args.pending_responding_commitment_signed.0 = true;
3919 reconnect_nodes(reconnect_args);
3920 } else if messages_delivered == 5 {
3921 // nodes[1] still wants its final RAA
3922 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3923 reconnect_args.pending_raa.1 = true;
3924 reconnect_nodes(reconnect_args);
3925 } else if messages_delivered == 6 {
3926 // Everything was delivered...
3927 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3930 let events_1 = nodes[1].node.get_and_clear_pending_events();
3931 if messages_delivered == 0 {
3932 assert_eq!(events_1.len(), 2);
3934 Event::ChannelReady { .. } => { },
3935 _ => panic!("Unexpected event"),
3938 Event::PendingHTLCsForwardable { .. } => { },
3939 _ => panic!("Unexpected event"),
3942 assert_eq!(events_1.len(), 1);
3944 Event::PendingHTLCsForwardable { .. } => { },
3945 _ => panic!("Unexpected event"),
3949 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3950 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3951 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3953 nodes[1].node.process_pending_htlc_forwards();
3955 let events_2 = nodes[1].node.get_and_clear_pending_events();
3956 assert_eq!(events_2.len(), 1);
3958 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
3959 assert_eq!(payment_hash_1, *payment_hash);
3960 assert_eq!(amount_msat, 1_000_000);
3961 assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3962 assert_eq!(via_channel_id, Some(channel_id));
3964 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
3965 assert!(payment_preimage.is_none());
3966 assert_eq!(payment_secret_1, *payment_secret);
3968 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
3971 _ => panic!("Unexpected event"),
3974 nodes[1].node.claim_funds(payment_preimage_1);
3975 check_added_monitors!(nodes[1], 1);
3976 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
3978 let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
3979 assert_eq!(events_3.len(), 1);
3980 let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
3981 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3982 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3983 assert!(updates.update_add_htlcs.is_empty());
3984 assert!(updates.update_fail_htlcs.is_empty());
3985 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
3986 assert!(updates.update_fail_malformed_htlcs.is_empty());
3987 assert!(updates.update_fee.is_none());
3988 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
3990 _ => panic!("Unexpected event"),
3993 if messages_delivered >= 1 {
3994 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
3996 let events_4 = nodes[0].node.get_and_clear_pending_events();
3997 assert_eq!(events_4.len(), 1);
3999 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4000 assert_eq!(payment_preimage_1, *payment_preimage);
4001 assert_eq!(payment_hash_1, *payment_hash);
4003 _ => panic!("Unexpected event"),
4006 if messages_delivered >= 2 {
4007 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
4008 check_added_monitors!(nodes[0], 1);
4009 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4011 if messages_delivered >= 3 {
4012 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4013 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4014 check_added_monitors!(nodes[1], 1);
4016 if messages_delivered >= 4 {
4017 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
4018 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4019 // No commitment_signed so get_event_msg's assert(len == 1) passes
4020 check_added_monitors!(nodes[1], 1);
4022 if messages_delivered >= 5 {
4023 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4024 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4025 check_added_monitors!(nodes[0], 1);
4032 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4033 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4034 if messages_delivered < 2 {
4035 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4036 reconnect_args.pending_htlc_claims.0 = 1;
4037 reconnect_nodes(reconnect_args);
4038 if messages_delivered < 1 {
4039 expect_payment_sent!(nodes[0], payment_preimage_1);
4041 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4043 } else if messages_delivered == 2 {
4044 // nodes[0] still wants its RAA + commitment_signed
4045 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4046 reconnect_args.pending_responding_commitment_signed.1 = true;
4047 reconnect_args.pending_raa.1 = true;
4048 reconnect_nodes(reconnect_args);
4049 } else if messages_delivered == 3 {
4050 // nodes[0] still wants its commitment_signed
4051 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4052 reconnect_args.pending_responding_commitment_signed.1 = true;
4053 reconnect_nodes(reconnect_args);
4054 } else if messages_delivered == 4 {
4055 // nodes[1] still wants its final RAA
4056 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4057 reconnect_args.pending_raa.0 = true;
4058 reconnect_nodes(reconnect_args);
4059 } else if messages_delivered == 5 {
4060 // Everything was delivered...
4061 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4064 if messages_delivered == 1 || messages_delivered == 2 {
4065 expect_payment_path_successful!(nodes[0]);
4067 if messages_delivered <= 5 {
4068 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4069 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4071 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4073 if messages_delivered > 2 {
4074 expect_payment_path_successful!(nodes[0]);
4077 // Channel should still work fine...
4078 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4079 let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
4080 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4084 fn test_drop_messages_peer_disconnect_a() {
4085 do_test_drop_messages_peer_disconnect(0, true);
4086 do_test_drop_messages_peer_disconnect(0, false);
4087 do_test_drop_messages_peer_disconnect(1, false);
4088 do_test_drop_messages_peer_disconnect(2, false);
4092 fn test_drop_messages_peer_disconnect_b() {
4093 do_test_drop_messages_peer_disconnect(3, false);
4094 do_test_drop_messages_peer_disconnect(4, false);
4095 do_test_drop_messages_peer_disconnect(5, false);
4096 do_test_drop_messages_peer_disconnect(6, false);
4100 fn test_channel_ready_without_best_block_updated() {
4101 // Previously, if we were offline when a funding transaction was locked in, and then we came
4102 // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
4103 // generate a channel_ready until a later best_block_updated. This tests that we generate the
4104 // channel_ready immediately instead.
4105 let chanmon_cfgs = create_chanmon_cfgs(2);
4106 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4107 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4108 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4109 *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
4111 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4113 let conf_height = nodes[0].best_block_info().1 + 1;
4114 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4115 let block_txn = [funding_tx];
4116 let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
4117 let conf_block_header = nodes[0].get_block_header(conf_height);
4118 nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
4120 // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4121 let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4122 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4126 fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
4127 let chanmon_cfgs = create_chanmon_cfgs(2);
4128 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4129 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4130 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4132 // Let channel_manager get ahead of chain_monitor by 1 block.
4133 // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4134 // in case where client calls block_connect on channel_manager first and then on chain_monitor.
4135 let height_1 = nodes[0].best_block_info().1 + 1;
4136 let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4138 nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4139 nodes[0].node.block_connected(&block_1, height_1);
4141 // Create channel, and it gets added to chain_monitor in funding_created.
4142 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4144 // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
4145 // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
4146 // was running ahead of chain_monitor at the time of funding_created.
4147 // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4148 // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4149 confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4150 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4152 // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4153 let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4154 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4158 fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
4159 let chanmon_cfgs = create_chanmon_cfgs(2);
4160 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4161 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4162 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4164 // Let chain_monitor get ahead of channel_manager by 1 block.
4165 // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4166 // in case where client calls block_connect on chain_monitor first and then on channel_manager.
4167 let height_1 = nodes[0].best_block_info().1 + 1;
4168 let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4170 nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4171 nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
4173 // Create channel, and it gets added to chain_monitor in funding_created.
4174 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4176 // channel_manager can't really skip block_1, it should get it eventually.
4177 nodes[0].node.block_connected(&block_1, height_1);
4179 // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
4180 // the block before block_1, since that was populated by channel_manager, and channel_manager was
4181 // running behind at the time of funding_created.
4182 // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4183 // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4184 confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4185 connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4187 // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4188 let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4189 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4193 fn test_drop_messages_peer_disconnect_dual_htlc() {
4194 // Test that we can handle reconnecting when both sides of a channel have pending
4195 // commitment_updates when we disconnect.
4196 let chanmon_cfgs = create_chanmon_cfgs(2);
4197 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4198 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4199 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4200 create_announced_chan_between_nodes(&nodes, 0, 1);
4202 let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
4204 // Now try to send a second payment which will fail to send
4205 let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4206 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
4207 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
4208 check_added_monitors!(nodes[0], 1);
4210 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4211 assert_eq!(events_1.len(), 1);
4213 MessageSendEvent::UpdateHTLCs { .. } => {},
4214 _ => panic!("Unexpected event"),
4217 nodes[1].node.claim_funds(payment_preimage_1);
4218 expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4219 check_added_monitors!(nodes[1], 1);
4221 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4222 assert_eq!(events_2.len(), 1);
4224 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4225 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4226 assert!(update_add_htlcs.is_empty());
4227 assert_eq!(update_fulfill_htlcs.len(), 1);
4228 assert!(update_fail_htlcs.is_empty());
4229 assert!(update_fail_malformed_htlcs.is_empty());
4230 assert!(update_fee.is_none());
4232 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4233 let events_3 = nodes[0].node.get_and_clear_pending_events();
4234 assert_eq!(events_3.len(), 1);
4236 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4237 assert_eq!(*payment_preimage, payment_preimage_1);
4238 assert_eq!(*payment_hash, payment_hash_1);
4240 _ => panic!("Unexpected event"),
4243 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4244 let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4245 // No commitment_signed so get_event_msg's assert(len == 1) passes
4246 check_added_monitors!(nodes[0], 1);
4248 _ => panic!("Unexpected event"),
4251 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4252 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4254 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
4255 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
4257 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4258 assert_eq!(reestablish_1.len(), 1);
4259 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
4260 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
4262 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4263 assert_eq!(reestablish_2.len(), 1);
4265 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4266 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4267 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4268 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4270 assert!(as_resp.0.is_none());
4271 assert!(bs_resp.0.is_none());
4273 assert!(bs_resp.1.is_none());
4274 assert!(bs_resp.2.is_none());
4276 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4278 assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4279 assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4280 assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4281 assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4282 assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4283 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4284 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4285 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4286 // No commitment_signed so get_event_msg's assert(len == 1) passes
4287 check_added_monitors!(nodes[1], 1);
4289 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4290 let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4291 assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4292 assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4293 assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4294 assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4295 assert!(bs_second_commitment_signed.update_fee.is_none());
4296 check_added_monitors!(nodes[1], 1);
4298 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4299 let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4300 assert!(as_commitment_signed.update_add_htlcs.is_empty());
4301 assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4302 assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4303 assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4304 assert!(as_commitment_signed.update_fee.is_none());
4305 check_added_monitors!(nodes[0], 1);
4307 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4308 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4309 // No commitment_signed so get_event_msg's assert(len == 1) passes
4310 check_added_monitors!(nodes[0], 1);
4312 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4313 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4314 // No commitment_signed so get_event_msg's assert(len == 1) passes
4315 check_added_monitors!(nodes[1], 1);
4317 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4318 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4319 check_added_monitors!(nodes[1], 1);
4321 expect_pending_htlcs_forwardable!(nodes[1]);
4323 let events_5 = nodes[1].node.get_and_clear_pending_events();
4324 assert_eq!(events_5.len(), 1);
4326 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4327 assert_eq!(payment_hash_2, *payment_hash);
4329 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
4330 assert!(payment_preimage.is_none());
4331 assert_eq!(payment_secret_2, *payment_secret);
4333 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
4336 _ => panic!("Unexpected event"),
4339 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4340 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4341 check_added_monitors!(nodes[0], 1);
4343 expect_payment_path_successful!(nodes[0]);
4344 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4347 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4348 // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4349 // to avoid our counterparty failing the channel.
4350 let chanmon_cfgs = create_chanmon_cfgs(2);
4351 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4352 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4353 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4355 create_announced_chan_between_nodes(&nodes, 0, 1);
4357 let our_payment_hash = if send_partial_mpp {
4358 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4359 // Use the utility function send_payment_along_path to send the payment with MPP data which
4360 // indicates there are more HTLCs coming.
4361 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4362 let payment_id = PaymentId([42; 32]);
4363 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
4364 RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap();
4365 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
4366 RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id,
4367 &None, session_privs[0]).unwrap();
4368 check_added_monitors!(nodes[0], 1);
4369 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4370 assert_eq!(events.len(), 1);
4371 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4372 // hop should *not* yet generate any PaymentClaimable event(s).
4373 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4376 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4379 let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
4380 connect_block(&nodes[0], &block);
4381 connect_block(&nodes[1], &block);
4382 let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4383 for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4384 block.header.prev_blockhash = block.block_hash();
4385 connect_block(&nodes[0], &block);
4386 connect_block(&nodes[1], &block);
4389 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4391 check_added_monitors!(nodes[1], 1);
4392 let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4393 assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4394 assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4395 assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4396 assert!(htlc_timeout_updates.update_fee.is_none());
4398 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4399 commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4400 // 100_000 msat as u64, followed by the height at which we failed back above
4401 let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4402 expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4403 expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4407 fn test_htlc_timeout() {
4408 do_test_htlc_timeout(true);
4409 do_test_htlc_timeout(false);
4412 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4413 // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4414 let chanmon_cfgs = create_chanmon_cfgs(3);
4415 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4416 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4417 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4418 create_announced_chan_between_nodes(&nodes, 0, 1);
4419 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4421 // Make sure all nodes are at the same starting height
4422 connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4423 connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4424 connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4426 // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4427 let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4428 nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4429 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4430 assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4431 check_added_monitors!(nodes[1], 1);
4433 // Now attempt to route a second payment, which should be placed in the holding cell
4434 let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4435 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4436 sending_node.node.send_payment_with_route(&route, second_payment_hash,
4437 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4439 check_added_monitors!(nodes[0], 1);
4440 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4441 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4442 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4443 expect_pending_htlcs_forwardable!(nodes[1]);
4445 check_added_monitors!(nodes[1], 0);
4447 connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4448 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4449 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4450 connect_blocks(&nodes[1], 1);
4453 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4454 check_added_monitors!(nodes[1], 1);
4455 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4456 assert_eq!(fail_commit.len(), 1);
4457 match fail_commit[0] {
4458 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4459 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4460 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4462 _ => unreachable!(),
4464 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4466 expect_payment_failed!(nodes[1], second_payment_hash, false);
4471 fn test_holding_cell_htlc_add_timeouts() {
4472 do_test_holding_cell_htlc_add_timeouts(false);
4473 do_test_holding_cell_htlc_add_timeouts(true);
4476 macro_rules! check_spendable_outputs {
4477 ($node: expr, $keysinterface: expr) => {
4479 let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4480 let mut txn = Vec::new();
4481 let mut all_outputs = Vec::new();
4482 let secp_ctx = Secp256k1::new();
4483 for event in events.drain(..) {
4485 Event::SpendableOutputs { mut outputs, channel_id: _ } => {
4486 for outp in outputs.drain(..) {
4487 txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
4488 all_outputs.push(outp);
4491 _ => panic!("Unexpected event"),
4494 if all_outputs.len() > 1 {
4495 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
4505 fn test_claim_sizeable_push_msat() {
4506 // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4507 let chanmon_cfgs = create_chanmon_cfgs(2);
4508 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4509 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4510 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4512 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4513 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4514 check_closed_broadcast!(nodes[1], true);
4515 check_added_monitors!(nodes[1], 1);
4516 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
4517 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4518 assert_eq!(node_txn.len(), 1);
4519 check_spends!(node_txn[0], chan.3);
4520 assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4522 mine_transaction(&nodes[1], &node_txn[0]);
4523 connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4525 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4526 assert_eq!(spend_txn.len(), 1);
4527 assert_eq!(spend_txn[0].input.len(), 1);
4528 check_spends!(spend_txn[0], node_txn[0]);
4529 assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4533 fn test_claim_on_remote_sizeable_push_msat() {
4534 // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4535 // to_remote output is encumbered by a P2WPKH
4536 let chanmon_cfgs = create_chanmon_cfgs(2);
4537 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4538 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4539 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4541 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4542 nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4543 check_closed_broadcast!(nodes[0], true);
4544 check_added_monitors!(nodes[0], 1);
4545 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
4547 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4548 assert_eq!(node_txn.len(), 1);
4549 check_spends!(node_txn[0], chan.3);
4550 assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4552 mine_transaction(&nodes[1], &node_txn[0]);
4553 check_closed_broadcast!(nodes[1], true);
4554 check_added_monitors!(nodes[1], 1);
4555 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4556 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4558 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4559 assert_eq!(spend_txn.len(), 1);
4560 check_spends!(spend_txn[0], node_txn[0]);
4564 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4565 // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4566 // to_remote output is encumbered by a P2WPKH
4568 let chanmon_cfgs = create_chanmon_cfgs(2);
4569 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4570 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4571 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4573 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4574 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4575 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4576 assert_eq!(revoked_local_txn[0].input.len(), 1);
4577 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4579 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4580 mine_transaction(&nodes[1], &revoked_local_txn[0]);
4581 check_closed_broadcast!(nodes[1], true);
4582 check_added_monitors!(nodes[1], 1);
4583 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4585 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4586 mine_transaction(&nodes[1], &node_txn[0]);
4587 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4589 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4590 assert_eq!(spend_txn.len(), 3);
4591 check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4592 check_spends!(spend_txn[1], node_txn[0]);
4593 check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4597 fn test_static_spendable_outputs_preimage_tx() {
4598 let chanmon_cfgs = create_chanmon_cfgs(2);
4599 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4600 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4601 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4603 // Create some initial channels
4604 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4606 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4608 let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4609 assert_eq!(commitment_tx[0].input.len(), 1);
4610 assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4612 // Settle A's commitment tx on B's chain
4613 nodes[1].node.claim_funds(payment_preimage);
4614 expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4615 check_added_monitors!(nodes[1], 1);
4616 mine_transaction(&nodes[1], &commitment_tx[0]);
4617 check_added_monitors!(nodes[1], 1);
4618 let events = nodes[1].node.get_and_clear_pending_msg_events();
4620 MessageSendEvent::UpdateHTLCs { .. } => {},
4621 _ => panic!("Unexpected event"),
4624 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4625 _ => panic!("Unexepected event"),
4628 // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4629 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4630 assert_eq!(node_txn.len(), 1);
4631 check_spends!(node_txn[0], commitment_tx[0]);
4632 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4634 mine_transaction(&nodes[1], &node_txn[0]);
4635 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4636 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4638 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4639 assert_eq!(spend_txn.len(), 1);
4640 check_spends!(spend_txn[0], node_txn[0]);
4644 fn test_static_spendable_outputs_timeout_tx() {
4645 let chanmon_cfgs = create_chanmon_cfgs(2);
4646 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4647 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4648 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4650 // Create some initial channels
4651 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4653 // Rebalance the network a bit by relaying one payment through all the channels ...
4654 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4656 let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4658 let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4659 assert_eq!(commitment_tx[0].input.len(), 1);
4660 assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4662 // Settle A's commitment tx on B' chain
4663 mine_transaction(&nodes[1], &commitment_tx[0]);
4664 check_added_monitors!(nodes[1], 1);
4665 let events = nodes[1].node.get_and_clear_pending_msg_events();
4667 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4668 _ => panic!("Unexpected event"),
4670 connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4672 // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4673 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4674 assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4675 check_spends!(node_txn[0], commitment_tx[0].clone());
4676 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4678 mine_transaction(&nodes[1], &node_txn[0]);
4679 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4680 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4681 expect_payment_failed!(nodes[1], our_payment_hash, false);
4683 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4684 assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4685 check_spends!(spend_txn[0], commitment_tx[0]);
4686 check_spends!(spend_txn[1], node_txn[0]);
4687 check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4691 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4692 let chanmon_cfgs = create_chanmon_cfgs(2);
4693 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4694 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4695 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4697 // Create some initial channels
4698 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4700 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4701 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4702 assert_eq!(revoked_local_txn[0].input.len(), 1);
4703 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4705 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4707 mine_transaction(&nodes[1], &revoked_local_txn[0]);
4708 check_closed_broadcast!(nodes[1], true);
4709 check_added_monitors!(nodes[1], 1);
4710 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4712 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4713 assert_eq!(node_txn.len(), 1);
4714 assert_eq!(node_txn[0].input.len(), 2);
4715 check_spends!(node_txn[0], revoked_local_txn[0]);
4717 mine_transaction(&nodes[1], &node_txn[0]);
4718 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4720 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4721 assert_eq!(spend_txn.len(), 1);
4722 check_spends!(spend_txn[0], node_txn[0]);
4726 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4727 let mut chanmon_cfgs = create_chanmon_cfgs(2);
4728 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4729 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4730 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4731 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4733 // Create some initial channels
4734 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4736 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4737 let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4738 assert_eq!(revoked_local_txn[0].input.len(), 1);
4739 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4741 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4743 // A will generate HTLC-Timeout from revoked commitment tx
4744 mine_transaction(&nodes[0], &revoked_local_txn[0]);
4745 check_closed_broadcast!(nodes[0], true);
4746 check_added_monitors!(nodes[0], 1);
4747 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4748 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4750 let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4751 assert_eq!(revoked_htlc_txn.len(), 1);
4752 assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4753 assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4754 check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4755 assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout
4757 // B will generate justice tx from A's revoked commitment/HTLC tx
4758 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4759 check_closed_broadcast!(nodes[1], true);
4760 check_added_monitors!(nodes[1], 1);
4761 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4763 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4764 assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4765 // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4766 // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4767 // transactions next...
4768 assert_eq!(node_txn[0].input.len(), 3);
4769 check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4771 assert_eq!(node_txn[1].input.len(), 2);
4772 check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4773 if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4774 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4776 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4777 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4780 mine_transaction(&nodes[1], &node_txn[1]);
4781 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4783 // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4784 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4785 assert_eq!(spend_txn.len(), 1);
4786 assert_eq!(spend_txn[0].input.len(), 1);
4787 check_spends!(spend_txn[0], node_txn[1]);
4791 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4792 let mut chanmon_cfgs = create_chanmon_cfgs(2);
4793 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4794 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4795 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4796 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4798 // Create some initial channels
4799 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4801 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4802 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4803 assert_eq!(revoked_local_txn[0].input.len(), 1);
4804 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4806 // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4807 assert_eq!(revoked_local_txn[0].output.len(), 2);
4809 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4811 // B will generate HTLC-Success from revoked commitment tx
4812 mine_transaction(&nodes[1], &revoked_local_txn[0]);
4813 check_closed_broadcast!(nodes[1], true);
4814 check_added_monitors!(nodes[1], 1);
4815 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4816 let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4818 assert_eq!(revoked_htlc_txn.len(), 1);
4819 assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4820 assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4821 check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4823 // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4824 let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4825 assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4827 // A will generate justice tx from B's revoked commitment/HTLC tx
4828 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4829 check_closed_broadcast!(nodes[0], true);
4830 check_added_monitors!(nodes[0], 1);
4831 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4833 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4834 assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4836 // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4837 // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4838 // transactions next...
4839 assert_eq!(node_txn[0].input.len(), 2);
4840 check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4841 if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4842 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4844 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4845 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4848 assert_eq!(node_txn[1].input.len(), 1);
4849 check_spends!(node_txn[1], revoked_htlc_txn[0]);
4851 mine_transaction(&nodes[0], &node_txn[1]);
4852 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4854 // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4855 // didn't try to generate any new transactions.
4857 // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4858 let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4859 assert_eq!(spend_txn.len(), 3);
4860 assert_eq!(spend_txn[0].input.len(), 1);
4861 check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4862 assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4863 check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4864 check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4868 fn test_onchain_to_onchain_claim() {
4869 // Test that in case of channel closure, we detect the state of output and claim HTLC
4870 // on downstream peer's remote commitment tx.
4871 // First, have C claim an HTLC against its own latest commitment transaction.
4872 // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4874 // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4877 let chanmon_cfgs = create_chanmon_cfgs(3);
4878 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4879 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4880 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4882 // Create some initial channels
4883 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4884 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4886 // Ensure all nodes are at the same height
4887 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4888 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4889 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4890 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4892 // Rebalance the network a bit by relaying one payment through all the channels ...
4893 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4894 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4896 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4897 let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4898 check_spends!(commitment_tx[0], chan_2.3);
4899 nodes[2].node.claim_funds(payment_preimage);
4900 expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4901 check_added_monitors!(nodes[2], 1);
4902 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4903 assert!(updates.update_add_htlcs.is_empty());
4904 assert!(updates.update_fail_htlcs.is_empty());
4905 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4906 assert!(updates.update_fail_malformed_htlcs.is_empty());
4908 mine_transaction(&nodes[2], &commitment_tx[0]);
4909 check_closed_broadcast!(nodes[2], true);
4910 check_added_monitors!(nodes[2], 1);
4911 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4913 let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4914 assert_eq!(c_txn.len(), 1);
4915 check_spends!(c_txn[0], commitment_tx[0]);
4916 assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4917 assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
4918 assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx
4920 // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4921 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
4922 check_added_monitors!(nodes[1], 1);
4923 let events = nodes[1].node.get_and_clear_pending_events();
4924 assert_eq!(events.len(), 2);
4926 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4927 _ => panic!("Unexpected event"),
4930 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
4931 next_channel_id, outbound_amount_forwarded_msat, ..
4933 assert_eq!(total_fee_earned_msat, Some(1000));
4934 assert_eq!(prev_channel_id, Some(chan_1.2));
4935 assert_eq!(claim_from_onchain_tx, true);
4936 assert_eq!(next_channel_id, Some(chan_2.2));
4937 assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4939 _ => panic!("Unexpected event"),
4941 check_added_monitors!(nodes[1], 1);
4942 let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4943 assert_eq!(msg_events.len(), 3);
4944 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4945 let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4947 match nodes_2_event {
4948 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
4949 _ => panic!("Unexpected event"),
4952 match nodes_0_event {
4953 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4954 assert!(update_add_htlcs.is_empty());
4955 assert!(update_fail_htlcs.is_empty());
4956 assert_eq!(update_fulfill_htlcs.len(), 1);
4957 assert!(update_fail_malformed_htlcs.is_empty());
4958 assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4960 _ => panic!("Unexpected event"),
4963 // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
4964 match msg_events[0] {
4965 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4966 _ => panic!("Unexpected event"),
4969 // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
4970 let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4971 mine_transaction(&nodes[1], &commitment_tx[0]);
4972 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4973 let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4974 // ChannelMonitor: HTLC-Success tx
4975 assert_eq!(b_txn.len(), 1);
4976 check_spends!(b_txn[0], commitment_tx[0]);
4977 assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4978 assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
4979 assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx
4981 check_closed_broadcast!(nodes[1], true);
4982 check_added_monitors!(nodes[1], 1);
4986 fn test_duplicate_payment_hash_one_failure_one_success() {
4987 // Topology : A --> B --> C --> D
4988 // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
4989 // Note that because C will refuse to generate two payment secrets for the same payment hash,
4990 // we forward one of the payments onwards to D.
4991 let chanmon_cfgs = create_chanmon_cfgs(4);
4992 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
4993 // When this test was written, the default base fee floated based on the HTLC count.
4994 // It is now fixed, so we simply set the fee to the expected value here.
4995 let mut config = test_default_channel_config();
4996 config.channel_config.forwarding_fee_base_msat = 196;
4997 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
4998 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
4999 let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
5001 create_announced_chan_between_nodes(&nodes, 0, 1);
5002 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5003 create_announced_chan_between_nodes(&nodes, 2, 3);
5005 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5006 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5007 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5008 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5009 connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
5011 let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
5013 let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
5014 // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
5015 // script push size limit so that the below script length checks match
5016 // ACCEPTED_HTLC_SCRIPT_WEIGHT.
5017 let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
5018 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
5019 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
5020 send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
5022 let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
5023 assert_eq!(commitment_txn[0].input.len(), 1);
5024 check_spends!(commitment_txn[0], chan_2.3);
5026 mine_transaction(&nodes[1], &commitment_txn[0]);
5027 check_closed_broadcast!(nodes[1], true);
5028 check_added_monitors!(nodes[1], 1);
5029 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
5030 connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
5032 let htlc_timeout_tx;
5033 { // Extract one of the two HTLC-Timeout transaction
5034 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5035 // ChannelMonitor: timeout tx * 2-or-3
5036 assert!(node_txn.len() == 2 || node_txn.len() == 3);
5038 check_spends!(node_txn[0], commitment_txn[0]);
5039 assert_eq!(node_txn[0].input.len(), 1);
5040 assert_eq!(node_txn[0].output.len(), 1);
5042 if node_txn.len() > 2 {
5043 check_spends!(node_txn[1], commitment_txn[0]);
5044 assert_eq!(node_txn[1].input.len(), 1);
5045 assert_eq!(node_txn[1].output.len(), 1);
5046 assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5048 check_spends!(node_txn[2], commitment_txn[0]);
5049 assert_eq!(node_txn[2].input.len(), 1);
5050 assert_eq!(node_txn[2].output.len(), 1);
5051 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
5053 check_spends!(node_txn[1], commitment_txn[0]);
5054 assert_eq!(node_txn[1].input.len(), 1);
5055 assert_eq!(node_txn[1].output.len(), 1);
5056 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5059 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5060 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5061 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
5062 // (with value 900 sats) will be claimed in the below `claim_funds` call.
5063 if node_txn.len() > 2 {
5064 assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5065 htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
5067 htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
5071 nodes[2].node.claim_funds(our_payment_preimage);
5072 expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
5074 mine_transaction(&nodes[2], &commitment_txn[0]);
5075 check_added_monitors!(nodes[2], 2);
5076 check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5077 let events = nodes[2].node.get_and_clear_pending_msg_events();
5079 MessageSendEvent::UpdateHTLCs { .. } => {},
5080 _ => panic!("Unexpected event"),
5083 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5084 _ => panic!("Unexepected event"),
5086 let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5087 assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
5088 check_spends!(htlc_success_txn[0], commitment_txn[0]);
5089 check_spends!(htlc_success_txn[1], commitment_txn[0]);
5090 assert_eq!(htlc_success_txn[0].input.len(), 1);
5091 assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5092 assert_eq!(htlc_success_txn[1].input.len(), 1);
5093 assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5094 assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
5095 assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
5097 mine_transaction(&nodes[1], &htlc_timeout_tx);
5098 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5099 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
5100 let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5101 assert!(htlc_updates.update_add_htlcs.is_empty());
5102 assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
5103 let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
5104 assert!(htlc_updates.update_fulfill_htlcs.is_empty());
5105 assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
5106 check_added_monitors!(nodes[1], 1);
5108 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
5109 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5111 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
5113 expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
5115 // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5116 mine_transaction(&nodes[1], &htlc_success_txn[1]);
5117 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
5118 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5119 assert!(updates.update_add_htlcs.is_empty());
5120 assert!(updates.update_fail_htlcs.is_empty());
5121 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5122 assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5123 assert!(updates.update_fail_malformed_htlcs.is_empty());
5124 check_added_monitors!(nodes[1], 1);
5126 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5127 commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5128 expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
5132 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5133 let chanmon_cfgs = create_chanmon_cfgs(2);
5134 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5135 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5136 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5138 // Create some initial channels
5139 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5141 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
5142 let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5143 assert_eq!(local_txn.len(), 1);
5144 assert_eq!(local_txn[0].input.len(), 1);
5145 check_spends!(local_txn[0], chan_1.3);
5147 // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5148 nodes[1].node.claim_funds(payment_preimage);
5149 expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
5150 check_added_monitors!(nodes[1], 1);
5152 mine_transaction(&nodes[1], &local_txn[0]);
5153 check_added_monitors!(nodes[1], 1);
5154 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5155 let events = nodes[1].node.get_and_clear_pending_msg_events();
5157 MessageSendEvent::UpdateHTLCs { .. } => {},
5158 _ => panic!("Unexpected event"),
5161 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5162 _ => panic!("Unexepected event"),
5165 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5166 assert_eq!(node_txn.len(), 1);
5167 assert_eq!(node_txn[0].input.len(), 1);
5168 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5169 check_spends!(node_txn[0], local_txn[0]);
5173 mine_transaction(&nodes[1], &node_tx);
5174 connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5176 // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5177 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5178 assert_eq!(spend_txn.len(), 1);
5179 assert_eq!(spend_txn[0].input.len(), 1);
5180 check_spends!(spend_txn[0], node_tx);
5181 assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5184 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5185 // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5186 // unrevoked commitment transaction.
5187 // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5188 // a remote RAA before they could be failed backwards (and combinations thereof).
5189 // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5190 // use the same payment hashes.
5191 // Thus, we use a six-node network:
5196 // And test where C fails back to A/B when D announces its latest commitment transaction
5197 let chanmon_cfgs = create_chanmon_cfgs(6);
5198 let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5199 // When this test was written, the default base fee floated based on the HTLC count.
5200 // It is now fixed, so we simply set the fee to the expected value here.
5201 let mut config = test_default_channel_config();
5202 config.channel_config.forwarding_fee_base_msat = 196;
5203 let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5204 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5205 let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5207 let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
5208 let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5209 let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5210 let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5211 let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5);
5213 // Rebalance and check output sanity...
5214 send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5215 send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5216 assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
5218 let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
5219 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
5221 let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5223 let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5224 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5226 send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5228 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5230 let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5232 let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5233 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5235 send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5237 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5240 let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5242 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5243 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5246 let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5248 let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5249 send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5251 // Double-check that six of the new HTLC were added
5252 // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5253 // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5254 assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5255 assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5257 // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5258 // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5259 nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5260 nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5261 nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5262 nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5263 check_added_monitors!(nodes[4], 0);
5265 let failed_destinations = vec![
5266 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5267 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5268 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5269 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5271 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5272 check_added_monitors!(nodes[4], 1);
5274 let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5275 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5276 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5277 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5278 nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5279 commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5281 // Fail 3rd below-dust and 7th above-dust HTLCs
5282 nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5283 nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5284 check_added_monitors!(nodes[5], 0);
5286 let failed_destinations_2 = vec![
5287 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5288 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5290 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5291 check_added_monitors!(nodes[5], 1);
5293 let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5294 nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5295 nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5296 commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5298 let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5300 // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5301 let failed_destinations_3 = vec![
5302 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5303 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5304 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5305 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5306 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5307 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5309 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5310 check_added_monitors!(nodes[3], 1);
5311 let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5312 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5313 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5314 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5315 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5316 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5317 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5318 if deliver_last_raa {
5319 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5321 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5324 // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5325 // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5326 // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5327 // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5329 // We now broadcast the latest commitment transaction, which *should* result in failures for
5330 // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5331 // the non-broadcast above-dust HTLCs.
5333 // Alternatively, we may broadcast the previous commitment transaction, which should only
5334 // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5335 let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5337 if announce_latest {
5338 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5340 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5342 let events = nodes[2].node.get_and_clear_pending_events();
5343 let close_event = if deliver_last_raa {
5344 assert_eq!(events.len(), 2 + 6);
5345 events.last().clone().unwrap()
5347 assert_eq!(events.len(), 1);
5348 events.last().clone().unwrap()
5351 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5352 _ => panic!("Unexpected event"),
5355 connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5356 check_closed_broadcast!(nodes[2], true);
5357 if deliver_last_raa {
5358 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
5360 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5361 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5363 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5364 repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5366 repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5369 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5371 check_added_monitors!(nodes[2], 3);
5373 let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5374 assert_eq!(cs_msgs.len(), 2);
5375 let mut a_done = false;
5376 for msg in cs_msgs {
5378 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5379 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5380 // should be failed-backwards here.
5381 let target = if *node_id == nodes[0].node.get_our_node_id() {
5382 // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5383 for htlc in &updates.update_fail_htlcs {
5384 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5386 assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5391 // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5392 for htlc in &updates.update_fail_htlcs {
5393 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5395 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5396 assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5399 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5400 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5401 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5402 if announce_latest {
5403 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5404 if *node_id == nodes[0].node.get_our_node_id() {
5405 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5408 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5410 _ => panic!("Unexpected event"),
5414 let as_events = nodes[0].node.get_and_clear_pending_events();
5415 assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5416 let mut as_faileds = new_hash_set();
5417 let mut as_updates = 0;
5418 for event in as_events.iter() {
5419 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5420 assert!(as_faileds.insert(*payment_hash));
5421 if *payment_hash != payment_hash_2 {
5422 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5424 assert!(!payment_failed_permanently);
5426 if let PathFailure::OnPath { network_update: Some(_) } = failure {
5429 } else if let &Event::PaymentFailed { .. } = event {
5430 } else { panic!("Unexpected event"); }
5432 assert!(as_faileds.contains(&payment_hash_1));
5433 assert!(as_faileds.contains(&payment_hash_2));
5434 if announce_latest {
5435 assert!(as_faileds.contains(&payment_hash_3));
5436 assert!(as_faileds.contains(&payment_hash_5));
5438 assert!(as_faileds.contains(&payment_hash_6));
5440 let bs_events = nodes[1].node.get_and_clear_pending_events();
5441 assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5442 let mut bs_faileds = new_hash_set();
5443 let mut bs_updates = 0;
5444 for event in bs_events.iter() {
5445 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5446 assert!(bs_faileds.insert(*payment_hash));
5447 if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5448 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5450 assert!(!payment_failed_permanently);
5452 if let PathFailure::OnPath { network_update: Some(_) } = failure {
5455 } else if let &Event::PaymentFailed { .. } = event {
5456 } else { panic!("Unexpected event"); }
5458 assert!(bs_faileds.contains(&payment_hash_1));
5459 assert!(bs_faileds.contains(&payment_hash_2));
5460 if announce_latest {
5461 assert!(bs_faileds.contains(&payment_hash_4));
5463 assert!(bs_faileds.contains(&payment_hash_5));
5465 // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5466 // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5467 // unknown-preimage-etc, B should have gotten 2. Thus, in the
5468 // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5469 assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5470 assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5474 fn test_fail_backwards_latest_remote_announce_a() {
5475 do_test_fail_backwards_unrevoked_remote_announce(false, true);
5479 fn test_fail_backwards_latest_remote_announce_b() {
5480 do_test_fail_backwards_unrevoked_remote_announce(true, true);
5484 fn test_fail_backwards_previous_remote_announce() {
5485 do_test_fail_backwards_unrevoked_remote_announce(false, false);
5486 // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5487 // tested for in test_commitment_revoked_fail_backward_exhaustive()
5491 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5492 let chanmon_cfgs = create_chanmon_cfgs(2);
5493 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5494 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5495 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5497 // Create some initial channels
5498 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5500 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5501 let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5502 assert_eq!(local_txn[0].input.len(), 1);
5503 check_spends!(local_txn[0], chan_1.3);
5505 // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5506 mine_transaction(&nodes[0], &local_txn[0]);
5507 check_closed_broadcast!(nodes[0], true);
5508 check_added_monitors!(nodes[0], 1);
5509 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5510 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5512 let htlc_timeout = {
5513 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5514 assert_eq!(node_txn.len(), 1);
5515 assert_eq!(node_txn[0].input.len(), 1);
5516 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5517 check_spends!(node_txn[0], local_txn[0]);
5521 mine_transaction(&nodes[0], &htlc_timeout);
5522 connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5523 expect_payment_failed!(nodes[0], our_payment_hash, false);
5525 // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5526 let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5527 assert_eq!(spend_txn.len(), 3);
5528 check_spends!(spend_txn[0], local_txn[0]);
5529 assert_eq!(spend_txn[1].input.len(), 1);
5530 check_spends!(spend_txn[1], htlc_timeout);
5531 assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5532 assert_eq!(spend_txn[2].input.len(), 2);
5533 check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5534 assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5535 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5539 fn test_key_derivation_params() {
5540 // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5541 // manager rotation to test that `channel_keys_id` returned in
5542 // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5543 // then derive a `delayed_payment_key`.
5545 let chanmon_cfgs = create_chanmon_cfgs(3);
5547 // We manually create the node configuration to backup the seed.
5548 let seed = [42; 32];
5549 let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5550 let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5551 let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5552 let scorer = RwLock::new(test_utils::TestScorer::new());
5553 let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
5554 let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
5555 let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5556 let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5557 node_cfgs.remove(0);
5558 node_cfgs.insert(0, node);
5560 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5561 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5563 // Create some initial channels
5564 // Create a dummy channel to advance index by one and thus test re-derivation correctness
5566 let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5567 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5568 assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5570 // Ensure all nodes are at the same height
5571 let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5572 connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5573 connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5574 connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5576 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5577 let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5578 let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5579 assert_eq!(local_txn_1[0].input.len(), 1);
5580 check_spends!(local_txn_1[0], chan_1.3);
5582 // We check funding pubkey are unique
5583 let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5584 let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5585 if from_0_funding_key_0 == from_1_funding_key_0
5586 || from_0_funding_key_0 == from_1_funding_key_1
5587 || from_0_funding_key_1 == from_1_funding_key_0
5588 || from_0_funding_key_1 == from_1_funding_key_1 {
5589 panic!("Funding pubkeys aren't unique");
5592 // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5593 mine_transaction(&nodes[0], &local_txn_1[0]);
5594 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5595 check_closed_broadcast!(nodes[0], true);
5596 check_added_monitors!(nodes[0], 1);
5597 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5599 let htlc_timeout = {
5600 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5601 assert_eq!(node_txn.len(), 1);
5602 assert_eq!(node_txn[0].input.len(), 1);
5603 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5604 check_spends!(node_txn[0], local_txn_1[0]);
5608 mine_transaction(&nodes[0], &htlc_timeout);
5609 connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5610 expect_payment_failed!(nodes[0], our_payment_hash, false);
5612 // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5613 let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5614 let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5615 assert_eq!(spend_txn.len(), 3);
5616 check_spends!(spend_txn[0], local_txn_1[0]);
5617 assert_eq!(spend_txn[1].input.len(), 1);
5618 check_spends!(spend_txn[1], htlc_timeout);
5619 assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5620 assert_eq!(spend_txn[2].input.len(), 2);
5621 check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5622 assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5623 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5627 fn test_static_output_closing_tx() {
5628 let chanmon_cfgs = create_chanmon_cfgs(2);
5629 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5630 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5631 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5633 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5635 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5636 let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5638 mine_transaction(&nodes[0], &closing_tx);
5639 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
5640 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5642 let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5643 assert_eq!(spend_txn.len(), 1);
5644 check_spends!(spend_txn[0], closing_tx);
5646 mine_transaction(&nodes[1], &closing_tx);
5647 check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
5648 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5650 let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5651 assert_eq!(spend_txn.len(), 1);
5652 check_spends!(spend_txn[0], closing_tx);
5655 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5656 let chanmon_cfgs = create_chanmon_cfgs(2);
5657 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5658 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5659 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5660 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5662 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5664 // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5665 // present in B's local commitment transaction, but none of A's commitment transactions.
5666 nodes[1].node.claim_funds(payment_preimage);
5667 check_added_monitors!(nodes[1], 1);
5668 expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5670 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5671 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5672 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
5674 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5675 check_added_monitors!(nodes[0], 1);
5676 let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5677 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5678 check_added_monitors!(nodes[1], 1);
5680 let starting_block = nodes[1].best_block_info();
5681 let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5682 for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5683 connect_block(&nodes[1], &block);
5684 block.header.prev_blockhash = block.block_hash();
5686 test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5687 check_closed_broadcast!(nodes[1], true);
5688 check_added_monitors!(nodes[1], 1);
5689 check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
5692 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5693 let chanmon_cfgs = create_chanmon_cfgs(2);
5694 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5695 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5696 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5697 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5699 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5700 nodes[0].node.send_payment_with_route(&route, payment_hash,
5701 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5702 check_added_monitors!(nodes[0], 1);
5704 let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5706 // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5707 // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5708 // to "time out" the HTLC.
5710 let starting_block = nodes[1].best_block_info();
5711 let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5713 for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5714 connect_block(&nodes[0], &block);
5715 block.header.prev_blockhash = block.block_hash();
5717 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5718 check_closed_broadcast!(nodes[0], true);
5719 check_added_monitors!(nodes[0], 1);
5720 check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5723 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5724 let chanmon_cfgs = create_chanmon_cfgs(3);
5725 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5726 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5727 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5728 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5730 // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5731 // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5732 // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5733 // actually revoked.
5734 let htlc_value = if use_dust { 50000 } else { 3000000 };
5735 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5736 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5737 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5738 check_added_monitors!(nodes[1], 1);
5740 let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5741 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5742 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5743 check_added_monitors!(nodes[0], 1);
5744 let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5745 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5746 check_added_monitors!(nodes[1], 1);
5747 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5748 check_added_monitors!(nodes[1], 1);
5749 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5751 if check_revoke_no_close {
5752 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5753 check_added_monitors!(nodes[0], 1);
5756 let starting_block = nodes[1].best_block_info();
5757 let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5758 for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5759 connect_block(&nodes[0], &block);
5760 block.header.prev_blockhash = block.block_hash();
5762 if !check_revoke_no_close {
5763 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5764 check_closed_broadcast!(nodes[0], true);
5765 check_added_monitors!(nodes[0], 1);
5766 check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5768 expect_payment_failed!(nodes[0], our_payment_hash, true);
5772 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5773 // There are only a few cases to test here:
5774 // * its not really normative behavior, but we test that below-dust HTLCs "included" in
5775 // broadcastable commitment transactions result in channel closure,
5776 // * its included in an unrevoked-but-previous remote commitment transaction,
5777 // * its included in the latest remote or local commitment transactions.
5778 // We test each of the three possible commitment transactions individually and use both dust and
5780 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5781 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5782 // tested for at least one of the cases in other tests.
5784 fn htlc_claim_single_commitment_only_a() {
5785 do_htlc_claim_local_commitment_only(true);
5786 do_htlc_claim_local_commitment_only(false);
5788 do_htlc_claim_current_remote_commitment_only(true);
5789 do_htlc_claim_current_remote_commitment_only(false);
5793 fn htlc_claim_single_commitment_only_b() {
5794 do_htlc_claim_previous_remote_commitment_only(true, false);
5795 do_htlc_claim_previous_remote_commitment_only(false, false);
5796 do_htlc_claim_previous_remote_commitment_only(true, true);
5797 do_htlc_claim_previous_remote_commitment_only(false, true);
5802 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5803 let chanmon_cfgs = create_chanmon_cfgs(2);
5804 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5805 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5806 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5807 // Force duplicate randomness for every get-random call
5808 for node in nodes.iter() {
5809 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5812 // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5813 let channel_value_satoshis=10000;
5814 let push_msat=10001;
5815 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5816 let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5817 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5818 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5820 // Create a second channel with the same random values. This used to panic due to a colliding
5821 // channel_id, but now panics due to a colliding outbound SCID alias.
5822 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5826 fn bolt2_open_channel_sending_node_checks_part2() {
5827 let chanmon_cfgs = create_chanmon_cfgs(2);
5828 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5829 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5830 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5832 // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5833 let channel_value_satoshis=2^24;
5834 let push_msat=10001;
5835 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5837 // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5838 let channel_value_satoshis=10000;
5839 // Test when push_msat is equal to 1000 * funding_satoshis.
5840 let push_msat=1000*channel_value_satoshis+1;
5841 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5843 // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5844 let channel_value_satoshis=10000;
5845 let push_msat=10001;
5846 assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
5847 let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5848 assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
5850 // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5851 // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5852 assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
5854 // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5855 assert!(BREAKDOWN_TIMEOUT>0);
5856 assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
5858 // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5859 let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
5860 assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
5862 // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5863 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
5864 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
5865 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
5866 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
5867 assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
5871 fn bolt2_open_channel_sane_dust_limit() {
5872 let chanmon_cfgs = create_chanmon_cfgs(2);
5873 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5874 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5875 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5877 let channel_value_satoshis=1000000;
5878 let push_msat=10001;
5879 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5880 let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5881 node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
5882 node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5884 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5885 let events = nodes[1].node.get_and_clear_pending_msg_events();
5886 let err_msg = match events[0] {
5887 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5890 _ => panic!("Unexpected event"),
5892 assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5895 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5896 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5897 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5898 // is no longer affordable once it's freed.
5900 fn test_fail_holding_cell_htlc_upon_free() {
5901 let chanmon_cfgs = create_chanmon_cfgs(2);
5902 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5903 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5904 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5905 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5907 // First nodes[0] generates an update_fee, setting the channel's
5908 // pending_update_fee.
5910 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5911 *feerate_lock += 20;
5913 nodes[0].node.timer_tick_occurred();
5914 check_added_monitors!(nodes[0], 1);
5916 let events = nodes[0].node.get_and_clear_pending_msg_events();
5917 assert_eq!(events.len(), 1);
5918 let (update_msg, commitment_signed) = match events[0] {
5919 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5920 (update_fee.as_ref(), commitment_signed)
5922 _ => panic!("Unexpected event"),
5925 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5927 let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5928 let channel_reserve = chan_stat.channel_reserve_msat;
5929 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5930 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5932 // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5933 let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5934 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5936 // Send a payment which passes reserve checks but gets stuck in the holding cell.
5937 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5938 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5939 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5940 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5942 // Flush the pending fee update.
5943 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5944 let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5945 check_added_monitors!(nodes[1], 1);
5946 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5947 check_added_monitors!(nodes[0], 1);
5949 // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5950 // HTLC, but now that the fee has been raised the payment will now fail, causing
5951 // us to surface its failure to the user.
5952 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5953 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5954 nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
5956 // Check that the payment failed to be sent out.
5957 let events = nodes[0].node.get_and_clear_pending_events();
5958 assert_eq!(events.len(), 2);
5960 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5961 assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
5962 assert_eq!(our_payment_hash.clone(), *payment_hash);
5963 assert_eq!(*payment_failed_permanently, false);
5964 assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id));
5966 _ => panic!("Unexpected event"),
5969 &Event::PaymentFailed { ref payment_hash, .. } => {
5970 assert_eq!(our_payment_hash.clone(), *payment_hash);
5972 _ => panic!("Unexpected event"),
5976 // Test that if multiple HTLCs are released from the holding cell and one is
5977 // valid but the other is no longer valid upon release, the valid HTLC can be
5978 // successfully completed while the other one fails as expected.
5980 fn test_free_and_fail_holding_cell_htlcs() {
5981 let chanmon_cfgs = create_chanmon_cfgs(2);
5982 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5983 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5984 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5985 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5987 // First nodes[0] generates an update_fee, setting the channel's
5988 // pending_update_fee.
5990 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5991 *feerate_lock += 200;
5993 nodes[0].node.timer_tick_occurred();
5994 check_added_monitors!(nodes[0], 1);
5996 let events = nodes[0].node.get_and_clear_pending_msg_events();
5997 assert_eq!(events.len(), 1);
5998 let (update_msg, commitment_signed) = match events[0] {
5999 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6000 (update_fee.as_ref(), commitment_signed)
6002 _ => panic!("Unexpected event"),
6005 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
6007 let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6008 let channel_reserve = chan_stat.channel_reserve_msat;
6009 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6010 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6012 // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6014 let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
6015 let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
6016 let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
6018 // Send 2 payments which pass reserve checks but get stuck in the holding cell.
6019 nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
6020 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
6021 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6022 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
6023 let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
6024 nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
6025 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
6026 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6027 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
6029 // Flush the pending fee update.
6030 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6031 let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6032 check_added_monitors!(nodes[1], 1);
6033 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
6034 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6035 check_added_monitors!(nodes[0], 2);
6037 // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
6038 // but now that the fee has been raised the second payment will now fail, causing us
6039 // to surface its failure to the user. The first payment should succeed.
6040 chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6041 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6042 nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
6044 // Check that the second payment failed to be sent out.
6045 let events = nodes[0].node.get_and_clear_pending_events();
6046 assert_eq!(events.len(), 2);
6048 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
6049 assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
6050 assert_eq!(payment_hash_2.clone(), *payment_hash);
6051 assert_eq!(*payment_failed_permanently, false);
6052 assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id));
6054 _ => panic!("Unexpected event"),
6057 &Event::PaymentFailed { ref payment_hash, .. } => {
6058 assert_eq!(payment_hash_2.clone(), *payment_hash);
6060 _ => panic!("Unexpected event"),
6063 // Complete the first payment and the RAA from the fee update.
6064 let (payment_event, send_raa_event) = {
6065 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
6066 assert_eq!(msgs.len(), 2);
6067 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
6069 let raa = match send_raa_event {
6070 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
6071 _ => panic!("Unexpected event"),
6073 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6074 check_added_monitors!(nodes[1], 1);
6075 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6076 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6077 let events = nodes[1].node.get_and_clear_pending_events();
6078 assert_eq!(events.len(), 1);
6080 Event::PendingHTLCsForwardable { .. } => {},
6081 _ => panic!("Unexpected event"),
6083 nodes[1].node.process_pending_htlc_forwards();
6084 let events = nodes[1].node.get_and_clear_pending_events();
6085 assert_eq!(events.len(), 1);
6087 Event::PaymentClaimable { .. } => {},
6088 _ => panic!("Unexpected event"),
6090 nodes[1].node.claim_funds(payment_preimage_1);
6091 check_added_monitors!(nodes[1], 1);
6092 expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
6094 let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6095 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
6096 commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
6097 expect_payment_sent!(nodes[0], payment_preimage_1);
6100 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
6101 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
6102 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
6105 fn test_fail_holding_cell_htlc_upon_free_multihop() {
6106 let chanmon_cfgs = create_chanmon_cfgs(3);
6107 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6108 // Avoid having to include routing fees in calculations
6109 let mut config = test_default_channel_config();
6110 config.channel_config.forwarding_fee_base_msat = 0;
6111 config.channel_config.forwarding_fee_proportional_millionths = 0;
6112 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
6113 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6114 let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6115 let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
6117 // First nodes[1] generates an update_fee, setting the channel's
6118 // pending_update_fee.
6120 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
6121 *feerate_lock += 20;
6123 nodes[1].node.timer_tick_occurred();
6124 check_added_monitors!(nodes[1], 1);
6126 let events = nodes[1].node.get_and_clear_pending_msg_events();
6127 assert_eq!(events.len(), 1);
6128 let (update_msg, commitment_signed) = match events[0] {
6129 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6130 (update_fee.as_ref(), commitment_signed)
6132 _ => panic!("Unexpected event"),
6135 nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6137 let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
6138 let channel_reserve = chan_stat.channel_reserve_msat;
6139 let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
6140 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
6142 // Send a payment which passes reserve checks but gets stuck in the holding cell.
6143 let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6144 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
6145 let payment_event = {
6146 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6147 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6148 check_added_monitors!(nodes[0], 1);
6150 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6151 assert_eq!(events.len(), 1);
6153 SendEvent::from_event(events.remove(0))
6155 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6156 check_added_monitors!(nodes[1], 0);
6157 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6158 expect_pending_htlcs_forwardable!(nodes[1]);
6160 chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
6161 assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6163 // Flush the pending fee update.
6164 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6165 let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6166 check_added_monitors!(nodes[2], 1);
6167 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6168 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6169 check_added_monitors!(nodes[1], 2);
6171 // A final RAA message is generated to finalize the fee update.
6172 let events = nodes[1].node.get_and_clear_pending_msg_events();
6173 assert_eq!(events.len(), 1);
6175 let raa_msg = match &events[0] {
6176 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6179 _ => panic!("Unexpected event"),
6182 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6183 check_added_monitors!(nodes[2], 1);
6184 assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6186 // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6187 let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6188 assert_eq!(process_htlc_forwards_event.len(), 2);
6189 match &process_htlc_forwards_event[1] {
6190 &Event::PendingHTLCsForwardable { .. } => {},
6191 _ => panic!("Unexpected event"),
6194 // In response, we call ChannelManager's process_pending_htlc_forwards
6195 nodes[1].node.process_pending_htlc_forwards();
6196 check_added_monitors!(nodes[1], 1);
6198 // This causes the HTLC to be failed backwards.
6199 let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6200 assert_eq!(fail_event.len(), 1);
6201 let (fail_msg, commitment_signed) = match &fail_event[0] {
6202 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6203 assert_eq!(updates.update_add_htlcs.len(), 0);
6204 assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6205 assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6206 assert_eq!(updates.update_fail_htlcs.len(), 1);
6207 (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6209 _ => panic!("Unexpected event"),
6212 // Pass the failure messages back to nodes[0].
6213 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6214 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6216 // Complete the HTLC failure+removal process.
6217 let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6218 check_added_monitors!(nodes[0], 1);
6219 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6220 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6221 check_added_monitors!(nodes[1], 2);
6222 let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6223 assert_eq!(final_raa_event.len(), 1);
6224 let raa = match &final_raa_event[0] {
6225 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6226 _ => panic!("Unexpected event"),
6228 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6229 expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6230 check_added_monitors!(nodes[0], 1);
6234 fn test_payment_route_reaching_same_channel_twice() {
6235 //A route should not go through the same channel twice
6236 //It is enforced when constructing a route.
6237 let chanmon_cfgs = create_chanmon_cfgs(2);
6238 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6239 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6240 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6241 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6243 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6244 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6245 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6247 // Extend the path by itself, essentially simulating route going through same channel twice
6248 let cloned_hops = route.paths[0].hops.clone();
6249 route.paths[0].hops.extend_from_slice(&cloned_hops);
6251 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6252 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6253 ), false, APIError::InvalidRoute { ref err },
6254 assert_eq!(err, &"Path went through the same channel twice"));
6257 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6258 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6259 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6262 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6263 //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6264 let chanmon_cfgs = create_chanmon_cfgs(2);
6265 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6266 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6267 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6268 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6270 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6271 route.paths[0].hops[0].fee_msat = 100;
6273 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6274 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6275 ), true, APIError::ChannelUnavailable { .. }, {});
6276 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6280 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6281 //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6282 let chanmon_cfgs = create_chanmon_cfgs(2);
6283 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6284 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6285 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6286 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6288 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6289 route.paths[0].hops[0].fee_msat = 0;
6290 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6291 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6292 true, APIError::ChannelUnavailable { ref err },
6293 assert_eq!(err, "Cannot send 0-msat HTLC"));
6295 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6296 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6300 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6301 //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6302 let chanmon_cfgs = create_chanmon_cfgs(2);
6303 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6304 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6305 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6306 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6308 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6309 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6310 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6311 check_added_monitors!(nodes[0], 1);
6312 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6313 updates.update_add_htlcs[0].amount_msat = 0;
6315 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6316 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3);
6317 check_closed_broadcast!(nodes[1], true).unwrap();
6318 check_added_monitors!(nodes[1], 1);
6319 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
6320 [nodes[0].node.get_our_node_id()], 100000);
6324 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6325 //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6326 //It is enforced when constructing a route.
6327 let chanmon_cfgs = create_chanmon_cfgs(2);
6328 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6329 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6330 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6331 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6333 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6334 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6335 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6336 route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
6337 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6338 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6339 ), true, APIError::InvalidRoute { ref err },
6340 assert_eq!(err, &"Channel CLTV overflowed?"));
6344 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6345 //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6346 //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6347 //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6348 let chanmon_cfgs = create_chanmon_cfgs(2);
6349 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6350 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6351 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6352 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6353 let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6354 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
6356 // Fetch a route in advance as we will be unable to once we're unable to send.
6357 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6358 for i in 0..max_accepted_htlcs {
6359 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6360 let payment_event = {
6361 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6362 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6363 check_added_monitors!(nodes[0], 1);
6365 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6366 assert_eq!(events.len(), 1);
6367 if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6368 assert_eq!(htlcs[0].htlc_id, i);
6372 SendEvent::from_event(events.remove(0))
6374 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6375 check_added_monitors!(nodes[1], 0);
6376 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6378 expect_pending_htlcs_forwardable!(nodes[1]);
6379 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6381 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6382 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6383 ), true, APIError::ChannelUnavailable { .. }, {});
6385 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6389 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6390 //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6391 let chanmon_cfgs = create_chanmon_cfgs(2);
6392 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6393 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6394 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6395 let channel_value = 100000;
6396 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6397 let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6399 send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6401 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6402 // Manually create a route over our max in flight (which our router normally automatically
6404 route.paths[0].hops[0].fee_msat = max_in_flight + 1;
6405 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6406 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6407 ), true, APIError::ChannelUnavailable { .. }, {});
6408 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6410 send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6413 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6415 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6416 //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6417 let chanmon_cfgs = create_chanmon_cfgs(2);
6418 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6419 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6420 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6421 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6422 let htlc_minimum_msat: u64;
6424 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6425 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6426 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6427 htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
6430 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6431 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6432 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6433 check_added_monitors!(nodes[0], 1);
6434 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6435 updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6436 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6437 assert!(nodes[1].node.list_channels().is_empty());
6438 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6439 assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6440 check_added_monitors!(nodes[1], 1);
6441 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6445 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6446 //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6447 let chanmon_cfgs = create_chanmon_cfgs(2);
6448 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6449 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6450 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6451 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6453 let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6454 let channel_reserve = chan_stat.channel_reserve_msat;
6455 let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6456 let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6457 // The 2* and +1 are for the fee spike reserve.
6458 let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6460 let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6461 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6462 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6463 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6464 check_added_monitors!(nodes[0], 1);
6465 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6467 // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6468 // at this time channel-initiatee receivers are not required to enforce that senders
6469 // respect the fee_spike_reserve.
6470 updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6471 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6473 assert!(nodes[1].node.list_channels().is_empty());
6474 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6475 assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6476 check_added_monitors!(nodes[1], 1);
6477 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6481 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6482 //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6483 //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6484 let chanmon_cfgs = create_chanmon_cfgs(2);
6485 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6486 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6487 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6488 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6490 let send_amt = 3999999;
6491 let (mut route, our_payment_hash, _, our_payment_secret) =
6492 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
6493 route.paths[0].hops[0].fee_msat = send_amt;
6494 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6495 let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
6496 let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6497 let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
6498 let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
6499 &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None).unwrap();
6500 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
6502 let mut msg = msgs::UpdateAddHTLC {
6506 payment_hash: our_payment_hash,
6507 cltv_expiry: htlc_cltv,
6508 onion_routing_packet: onion_packet.clone(),
6509 skimmed_fee_msat: None,
6510 blinding_point: None,
6514 msg.htlc_id = i as u64;
6515 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6517 msg.htlc_id = (50) as u64;
6518 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6520 assert!(nodes[1].node.list_channels().is_empty());
6521 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6522 assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6523 check_added_monitors!(nodes[1], 1);
6524 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6528 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6529 //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6530 let chanmon_cfgs = create_chanmon_cfgs(2);
6531 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6532 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6533 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6534 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6536 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6537 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6538 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6539 check_added_monitors!(nodes[0], 1);
6540 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6541 updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6542 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6544 assert!(nodes[1].node.list_channels().is_empty());
6545 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6546 assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6547 check_added_monitors!(nodes[1], 1);
6548 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
6552 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6553 //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6554 let chanmon_cfgs = create_chanmon_cfgs(2);
6555 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6556 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6557 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6559 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6560 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6561 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6562 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6563 check_added_monitors!(nodes[0], 1);
6564 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6565 updates.update_add_htlcs[0].cltv_expiry = 500000000;
6566 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6568 assert!(nodes[1].node.list_channels().is_empty());
6569 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6570 assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6571 check_added_monitors!(nodes[1], 1);
6572 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6576 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6577 //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6578 // We test this by first testing that that repeated HTLCs pass commitment signature checks
6579 // after disconnect and that non-sequential htlc_ids result in a channel failure.
6580 let chanmon_cfgs = create_chanmon_cfgs(2);
6581 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6582 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6583 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6585 create_announced_chan_between_nodes(&nodes, 0, 1);
6586 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6587 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6588 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6589 check_added_monitors!(nodes[0], 1);
6590 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6591 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6593 //Disconnect and Reconnect
6594 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6595 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6596 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
6597 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
6599 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6600 assert_eq!(reestablish_1.len(), 1);
6601 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
6602 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
6604 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6605 assert_eq!(reestablish_2.len(), 1);
6606 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6607 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6608 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6609 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6612 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6613 assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6614 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6615 check_added_monitors!(nodes[1], 1);
6616 let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6618 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6620 assert!(nodes[1].node.list_channels().is_empty());
6621 let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6622 assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6623 check_added_monitors!(nodes[1], 1);
6624 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6628 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6629 //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6631 let chanmon_cfgs = create_chanmon_cfgs(2);
6632 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6633 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6634 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6635 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6636 let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6637 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6638 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6640 check_added_monitors!(nodes[0], 1);
6641 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6642 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6644 let update_msg = msgs::UpdateFulfillHTLC{
6647 payment_preimage: our_payment_preimage,
6650 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6652 assert!(nodes[0].node.list_channels().is_empty());
6653 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6654 assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6655 check_added_monitors!(nodes[0], 1);
6656 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6660 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6661 //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6663 let chanmon_cfgs = create_chanmon_cfgs(2);
6664 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6665 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6666 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6667 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6669 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6670 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6671 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6672 check_added_monitors!(nodes[0], 1);
6673 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6674 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6676 let update_msg = msgs::UpdateFailHTLC{
6679 reason: msgs::OnionErrorPacket { data: Vec::new()},
6682 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6684 assert!(nodes[0].node.list_channels().is_empty());
6685 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6686 assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6687 check_added_monitors!(nodes[0], 1);
6688 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6692 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6693 //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6695 let chanmon_cfgs = create_chanmon_cfgs(2);
6696 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6697 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6698 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6699 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6701 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6702 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6703 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6704 check_added_monitors!(nodes[0], 1);
6705 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6706 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6707 let update_msg = msgs::UpdateFailMalformedHTLC{
6710 sha256_of_onion: [1; 32],
6711 failure_code: 0x8000,
6714 nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6716 assert!(nodes[0].node.list_channels().is_empty());
6717 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6718 assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6719 check_added_monitors!(nodes[0], 1);
6720 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6724 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6725 //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6727 let chanmon_cfgs = create_chanmon_cfgs(2);
6728 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6729 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6730 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6731 create_announced_chan_between_nodes(&nodes, 0, 1);
6733 let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6735 nodes[1].node.claim_funds(our_payment_preimage);
6736 check_added_monitors!(nodes[1], 1);
6737 expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6739 let events = nodes[1].node.get_and_clear_pending_msg_events();
6740 assert_eq!(events.len(), 1);
6741 let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6743 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6744 assert!(update_add_htlcs.is_empty());
6745 assert_eq!(update_fulfill_htlcs.len(), 1);
6746 assert!(update_fail_htlcs.is_empty());
6747 assert!(update_fail_malformed_htlcs.is_empty());
6748 assert!(update_fee.is_none());
6749 update_fulfill_htlcs[0].clone()
6751 _ => panic!("Unexpected event"),
6755 update_fulfill_msg.htlc_id = 1;
6757 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6759 assert!(nodes[0].node.list_channels().is_empty());
6760 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6761 assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6762 check_added_monitors!(nodes[0], 1);
6763 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6767 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6768 //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6770 let chanmon_cfgs = create_chanmon_cfgs(2);
6771 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6772 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6773 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6774 create_announced_chan_between_nodes(&nodes, 0, 1);
6776 let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6778 nodes[1].node.claim_funds(our_payment_preimage);
6779 check_added_monitors!(nodes[1], 1);
6780 expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6782 let events = nodes[1].node.get_and_clear_pending_msg_events();
6783 assert_eq!(events.len(), 1);
6784 let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6786 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6787 assert!(update_add_htlcs.is_empty());
6788 assert_eq!(update_fulfill_htlcs.len(), 1);
6789 assert!(update_fail_htlcs.is_empty());
6790 assert!(update_fail_malformed_htlcs.is_empty());
6791 assert!(update_fee.is_none());
6792 update_fulfill_htlcs[0].clone()
6794 _ => panic!("Unexpected event"),
6798 update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6800 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6802 assert!(nodes[0].node.list_channels().is_empty());
6803 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6804 assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6805 check_added_monitors!(nodes[0], 1);
6806 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6810 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6811 //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6813 let chanmon_cfgs = create_chanmon_cfgs(2);
6814 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6815 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6816 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6817 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6819 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6820 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6821 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6822 check_added_monitors!(nodes[0], 1);
6824 let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6825 updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6827 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6828 check_added_monitors!(nodes[1], 0);
6829 commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6831 let events = nodes[1].node.get_and_clear_pending_msg_events();
6833 let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6835 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6836 assert!(update_add_htlcs.is_empty());
6837 assert!(update_fulfill_htlcs.is_empty());
6838 assert!(update_fail_htlcs.is_empty());
6839 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6840 assert!(update_fee.is_none());
6841 update_fail_malformed_htlcs[0].clone()
6843 _ => panic!("Unexpected event"),
6846 update_msg.failure_code &= !0x8000;
6847 nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6849 assert!(nodes[0].node.list_channels().is_empty());
6850 let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6851 assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6852 check_added_monitors!(nodes[0], 1);
6853 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
6857 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6858 //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6859 // * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6861 let chanmon_cfgs = create_chanmon_cfgs(3);
6862 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6863 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6864 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6865 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6866 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6868 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6871 let mut payment_event = {
6872 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6873 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6874 check_added_monitors!(nodes[0], 1);
6875 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6876 assert_eq!(events.len(), 1);
6877 SendEvent::from_event(events.remove(0))
6879 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6880 check_added_monitors!(nodes[1], 0);
6881 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6882 expect_pending_htlcs_forwardable!(nodes[1]);
6883 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6884 assert_eq!(events_2.len(), 1);
6885 check_added_monitors!(nodes[1], 1);
6886 payment_event = SendEvent::from_event(events_2.remove(0));
6887 assert_eq!(payment_event.msgs.len(), 1);
6890 payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6891 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6892 check_added_monitors!(nodes[2], 0);
6893 commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6895 let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6896 assert_eq!(events_3.len(), 1);
6897 let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6899 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6900 assert!(update_add_htlcs.is_empty());
6901 assert!(update_fulfill_htlcs.is_empty());
6902 assert!(update_fail_htlcs.is_empty());
6903 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6904 assert!(update_fee.is_none());
6905 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6907 _ => panic!("Unexpected event"),
6911 nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6913 check_added_monitors!(nodes[1], 0);
6914 commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6915 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6916 let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6917 assert_eq!(events_4.len(), 1);
6919 //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6921 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6922 assert!(update_add_htlcs.is_empty());
6923 assert!(update_fulfill_htlcs.is_empty());
6924 assert_eq!(update_fail_htlcs.len(), 1);
6925 assert!(update_fail_malformed_htlcs.is_empty());
6926 assert!(update_fee.is_none());
6928 _ => panic!("Unexpected event"),
6931 check_added_monitors!(nodes[1], 1);
6935 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6936 let chanmon_cfgs = create_chanmon_cfgs(3);
6937 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6938 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6939 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6940 create_announced_chan_between_nodes(&nodes, 0, 1);
6941 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6943 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6946 let mut payment_event = {
6947 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6948 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6949 check_added_monitors!(nodes[0], 1);
6950 SendEvent::from_node(&nodes[0])
6953 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6954 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6955 expect_pending_htlcs_forwardable!(nodes[1]);
6956 check_added_monitors!(nodes[1], 1);
6957 payment_event = SendEvent::from_node(&nodes[1]);
6958 assert_eq!(payment_event.msgs.len(), 1);
6961 payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
6962 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6963 check_added_monitors!(nodes[2], 0);
6964 commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6966 let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6967 assert_eq!(events_3.len(), 1);
6969 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6970 let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
6971 // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
6972 update_msg.failure_code |= 0x2000;
6974 nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
6975 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
6977 _ => panic!("Unexpected event"),
6980 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
6981 vec![HTLCDestination::NextHopChannel {
6982 node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6983 let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6984 assert_eq!(events_4.len(), 1);
6985 check_added_monitors!(nodes[1], 1);
6988 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6989 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
6990 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
6992 _ => panic!("Unexpected event"),
6995 let events_5 = nodes[0].node.get_and_clear_pending_events();
6996 assert_eq!(events_5.len(), 2);
6998 // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
6999 // the node originating the error to its next hop.
7001 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
7003 assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
7004 assert!(is_permanent);
7005 assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
7007 _ => panic!("Unexpected event"),
7010 Event::PaymentFailed { payment_hash, .. } => {
7011 assert_eq!(payment_hash, our_payment_hash);
7013 _ => panic!("Unexpected event"),
7016 // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
7019 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
7020 // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
7021 // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
7022 // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
7024 let mut chanmon_cfgs = create_chanmon_cfgs(2);
7025 chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7026 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7027 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7028 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7029 let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
7031 let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7032 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7034 // We route 2 dust-HTLCs between A and B
7035 let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7036 let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7037 route_payment(&nodes[0], &[&nodes[1]], 1000000);
7039 // Cache one local commitment tx as previous
7040 let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7042 // Fail one HTLC to prune it in the will-be-latest-local commitment tx
7043 nodes[1].node.fail_htlc_backwards(&payment_hash_2);
7044 check_added_monitors!(nodes[1], 0);
7045 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7046 check_added_monitors!(nodes[1], 1);
7048 let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7049 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
7050 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
7051 check_added_monitors!(nodes[0], 1);
7053 // Cache one local commitment tx as lastest
7054 let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7056 let events = nodes[0].node.get_and_clear_pending_msg_events();
7058 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
7059 assert_eq!(node_id, nodes[1].node.get_our_node_id());
7061 _ => panic!("Unexpected event"),
7064 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
7065 assert_eq!(node_id, nodes[1].node.get_our_node_id());
7067 _ => panic!("Unexpected event"),
7070 assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
7071 // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
7072 if announce_latest {
7073 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
7075 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
7078 check_closed_broadcast!(nodes[0], true);
7079 check_added_monitors!(nodes[0], 1);
7080 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7082 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7083 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7084 let events = nodes[0].node.get_and_clear_pending_events();
7085 // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
7086 assert_eq!(events.len(), 4);
7087 let mut first_failed = false;
7088 for event in events {
7090 Event::PaymentPathFailed { payment_hash, .. } => {
7091 if payment_hash == payment_hash_1 {
7092 assert!(!first_failed);
7093 first_failed = true;
7095 assert_eq!(payment_hash, payment_hash_2);
7098 Event::PaymentFailed { .. } => {}
7099 _ => panic!("Unexpected event"),
7105 fn test_failure_delay_dust_htlc_local_commitment() {
7106 do_test_failure_delay_dust_htlc_local_commitment(true);
7107 do_test_failure_delay_dust_htlc_local_commitment(false);
7110 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
7111 // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
7112 // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
7113 // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
7114 // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
7115 // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
7116 // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
7118 let chanmon_cfgs = create_chanmon_cfgs(3);
7119 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7120 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7121 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7122 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
7124 let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7125 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7127 let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7128 let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7130 let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7131 let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
7133 // We revoked bs_commitment_tx
7135 let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7136 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
7139 let mut timeout_tx = Vec::new();
7141 // We fail dust-HTLC 1 by broadcast of local commitment tx
7142 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7143 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7144 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7145 expect_payment_failed!(nodes[0], dust_hash, false);
7147 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7148 check_closed_broadcast!(nodes[0], true);
7149 check_added_monitors!(nodes[0], 1);
7150 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7151 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
7152 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7153 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7154 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7155 mine_transaction(&nodes[0], &timeout_tx[0]);
7156 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7157 expect_payment_failed!(nodes[0], non_dust_hash, false);
7159 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7160 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7161 check_closed_broadcast!(nodes[0], true);
7162 check_added_monitors!(nodes[0], 1);
7163 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7164 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7166 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7167 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
7168 .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
7169 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
7170 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
7171 // dust HTLC should have been failed.
7172 expect_payment_failed!(nodes[0], dust_hash, false);
7175 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7177 assert_eq!(timeout_tx[0].lock_time.to_consensus_u32(), 11);
7179 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
7180 mine_transaction(&nodes[0], &timeout_tx[0]);
7181 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7182 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7183 expect_payment_failed!(nodes[0], non_dust_hash, false);
7188 fn test_sweep_outbound_htlc_failure_update() {
7189 do_test_sweep_outbound_htlc_failure_update(false, true);
7190 do_test_sweep_outbound_htlc_failure_update(false, false);
7191 do_test_sweep_outbound_htlc_failure_update(true, false);
7195 fn test_user_configurable_csv_delay() {
7196 // We test our channel constructors yield errors when we pass them absurd csv delay
7198 let mut low_our_to_self_config = UserConfig::default();
7199 low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
7200 let mut high_their_to_self_config = UserConfig::default();
7201 high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
7202 let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7203 let chanmon_cfgs = create_chanmon_cfgs(2);
7204 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7205 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7206 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7208 // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
7209 if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7210 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
7211 &low_our_to_self_config, 0, 42, None)
7214 APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7215 _ => panic!("Unexpected event"),
7217 } else { assert!(false) }
7219 // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
7220 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7221 let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7222 open_channel.common_fields.to_self_delay = 200;
7223 if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7224 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7225 &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7228 ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7229 _ => panic!("Unexpected event"),
7231 } else { assert!(false); }
7233 // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7234 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7235 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7236 let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7237 accept_channel.common_fields.to_self_delay = 200;
7238 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
7240 if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7242 &ErrorAction::SendErrorMessage { ref msg } => {
7243 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7244 reason_msg = msg.data.clone();
7248 } else { panic!(); }
7249 check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
7251 // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
7252 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7253 let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7254 open_channel.common_fields.to_self_delay = 200;
7255 if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7256 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7257 &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7260 ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7261 _ => panic!("Unexpected event"),
7263 } else { assert!(false); }
7267 fn test_check_htlc_underpaying() {
7268 // Send payment through A -> B but A is maliciously
7269 // sending a probe payment (i.e less than expected value0
7270 // to B, B should refuse payment.
7272 let chanmon_cfgs = create_chanmon_cfgs(2);
7273 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7274 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7275 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7277 // Create some initial channels
7278 create_announced_chan_between_nodes(&nodes, 0, 1);
7280 let scorer = test_utils::TestScorer::new();
7281 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7282 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
7283 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7284 let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
7285 let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
7286 None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7287 let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7288 let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7289 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7290 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7291 check_added_monitors!(nodes[0], 1);
7293 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7294 assert_eq!(events.len(), 1);
7295 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7296 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7297 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7299 // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7300 // and then will wait a second random delay before failing the HTLC back:
7301 expect_pending_htlcs_forwardable!(nodes[1]);
7302 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7304 // Node 3 is expecting payment of 100_000 but received 10_000,
7305 // it should fail htlc like we didn't know the preimage.
7306 nodes[1].node.process_pending_htlc_forwards();
7308 let events = nodes[1].node.get_and_clear_pending_msg_events();
7309 assert_eq!(events.len(), 1);
7310 let (update_fail_htlc, commitment_signed) = match events[0] {
7311 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7312 assert!(update_add_htlcs.is_empty());
7313 assert!(update_fulfill_htlcs.is_empty());
7314 assert_eq!(update_fail_htlcs.len(), 1);
7315 assert!(update_fail_malformed_htlcs.is_empty());
7316 assert!(update_fee.is_none());
7317 (update_fail_htlcs[0].clone(), commitment_signed)
7319 _ => panic!("Unexpected event"),
7321 check_added_monitors!(nodes[1], 1);
7323 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7324 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7326 // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7327 let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7328 expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7329 expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7333 fn test_announce_disable_channels() {
7334 // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7335 // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7337 let chanmon_cfgs = create_chanmon_cfgs(2);
7338 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7339 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7340 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7342 // Connect a dummy node for proper future events broadcasting
7343 connect_dummy_node(&nodes[0]);
7345 create_announced_chan_between_nodes(&nodes, 0, 1);
7346 create_announced_chan_between_nodes(&nodes, 1, 0);
7347 create_announced_chan_between_nodes(&nodes, 0, 1);
7350 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7351 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7353 for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
7354 nodes[0].node.timer_tick_occurred();
7356 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7357 assert_eq!(msg_events.len(), 3);
7358 let mut chans_disabled = new_hash_map();
7359 for e in msg_events {
7361 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7362 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7363 // Check that each channel gets updated exactly once
7364 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7365 panic!("Generated ChannelUpdate for wrong chan!");
7368 _ => panic!("Unexpected event"),
7372 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
7373 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
7375 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7376 assert_eq!(reestablish_1.len(), 3);
7377 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
7378 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
7380 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7381 assert_eq!(reestablish_2.len(), 3);
7383 // Reestablish chan_1
7384 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7385 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7386 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7387 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7388 // Reestablish chan_2
7389 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7390 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7391 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7392 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7393 // Reestablish chan_3
7394 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7395 handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7396 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7397 handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7399 for _ in 0..ENABLE_GOSSIP_TICKS {
7400 nodes[0].node.timer_tick_occurred();
7402 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7403 nodes[0].node.timer_tick_occurred();
7404 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7405 assert_eq!(msg_events.len(), 3);
7406 for e in msg_events {
7408 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7409 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7410 match chans_disabled.remove(&msg.contents.short_channel_id) {
7411 // Each update should have a higher timestamp than the previous one, replacing
7413 Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7414 None => panic!("Generated ChannelUpdate for wrong chan!"),
7417 _ => panic!("Unexpected event"),
7420 // Check that each channel gets updated exactly once
7421 assert!(chans_disabled.is_empty());
7425 fn test_bump_penalty_txn_on_revoked_commitment() {
7426 // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7427 // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7429 let chanmon_cfgs = create_chanmon_cfgs(2);
7430 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7431 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7432 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7434 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7436 let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7437 let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7438 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7439 let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
7440 send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7442 let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7443 // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7444 assert_eq!(revoked_txn[0].output.len(), 4);
7445 assert_eq!(revoked_txn[0].input.len(), 1);
7446 assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7447 let revoked_txid = revoked_txn[0].txid();
7449 let mut penalty_sum = 0;
7450 for outp in revoked_txn[0].output.iter() {
7451 if outp.script_pubkey.is_v0_p2wsh() {
7452 penalty_sum += outp.value;
7456 // Connect blocks to change height_timer range to see if we use right soonest_timelock
7457 let header_114 = connect_blocks(&nodes[1], 14);
7459 // Actually revoke tx by claiming a HTLC
7460 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7461 connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
7462 check_added_monitors!(nodes[1], 1);
7464 // One or more justice tx should have been broadcast, check it
7468 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7469 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7470 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7471 assert_eq!(node_txn[0].output.len(), 1);
7472 check_spends!(node_txn[0], revoked_txn[0]);
7473 let fee_1 = penalty_sum - node_txn[0].output[0].value;
7474 feerate_1 = fee_1 * 1000 / node_txn[0].weight().to_wu();
7475 penalty_1 = node_txn[0].txid();
7479 // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7480 connect_blocks(&nodes[1], 15);
7481 let mut penalty_2 = penalty_1;
7482 let mut feerate_2 = 0;
7484 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7485 assert_eq!(node_txn.len(), 1);
7486 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7487 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7488 assert_eq!(node_txn[0].output.len(), 1);
7489 check_spends!(node_txn[0], revoked_txn[0]);
7490 penalty_2 = node_txn[0].txid();
7491 // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7492 assert_ne!(penalty_2, penalty_1);
7493 let fee_2 = penalty_sum - node_txn[0].output[0].value;
7494 feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7495 // Verify 25% bump heuristic
7496 assert!(feerate_2 * 100 >= feerate_1 * 125);
7500 assert_ne!(feerate_2, 0);
7502 // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7503 connect_blocks(&nodes[1], 1);
7505 let mut feerate_3 = 0;
7507 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7508 assert_eq!(node_txn.len(), 1);
7509 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7510 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7511 assert_eq!(node_txn[0].output.len(), 1);
7512 check_spends!(node_txn[0], revoked_txn[0]);
7513 penalty_3 = node_txn[0].txid();
7514 // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7515 assert_ne!(penalty_3, penalty_2);
7516 let fee_3 = penalty_sum - node_txn[0].output[0].value;
7517 feerate_3 = fee_3 * 1000 / node_txn[0].weight().to_wu();
7518 // Verify 25% bump heuristic
7519 assert!(feerate_3 * 100 >= feerate_2 * 125);
7523 assert_ne!(feerate_3, 0);
7525 nodes[1].node.get_and_clear_pending_events();
7526 nodes[1].node.get_and_clear_pending_msg_events();
7530 fn test_bump_penalty_txn_on_revoked_htlcs() {
7531 // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7532 // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7534 let mut chanmon_cfgs = create_chanmon_cfgs(2);
7535 chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7536 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7537 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7538 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7540 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7541 // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7542 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7543 let scorer = test_utils::TestScorer::new();
7544 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7545 let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7546 let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
7547 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7548 let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7549 let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50)
7550 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7551 let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7552 let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
7553 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7554 let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
7556 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7557 assert_eq!(revoked_local_txn[0].input.len(), 1);
7558 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7560 // Revoke local commitment tx
7561 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7563 // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7564 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
7565 check_closed_broadcast!(nodes[1], true);
7566 check_added_monitors!(nodes[1], 1);
7567 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
7568 connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7570 let revoked_htlc_txn = {
7571 let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
7572 assert_eq!(txn.len(), 2);
7574 assert_eq!(txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7575 assert_eq!(txn[0].input.len(), 1);
7576 check_spends!(txn[0], revoked_local_txn[0]);
7578 assert_eq!(txn[1].input.len(), 1);
7579 assert_eq!(txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7580 assert_eq!(txn[1].output.len(), 1);
7581 check_spends!(txn[1], revoked_local_txn[0]);
7586 // Broadcast set of revoked txn on A
7587 let hash_128 = connect_blocks(&nodes[0], 40);
7588 let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
7589 connect_block(&nodes[0], &block_11);
7590 let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
7591 connect_block(&nodes[0], &block_129);
7592 let events = nodes[0].node.get_and_clear_pending_events();
7593 expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
7594 match events.last().unwrap() {
7595 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7596 _ => panic!("Unexpected event"),
7602 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7603 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7604 // Verify claim tx are spending revoked HTLC txn
7606 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7607 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7608 // which are included in the same block (they are broadcasted because we scan the
7609 // transactions linearly and generate claims as we go, they likely should be removed in the
7611 assert_eq!(node_txn[0].input.len(), 1);
7612 check_spends!(node_txn[0], revoked_local_txn[0]);
7613 assert_eq!(node_txn[1].input.len(), 1);
7614 check_spends!(node_txn[1], revoked_local_txn[0]);
7615 assert_eq!(node_txn[2].input.len(), 1);
7616 check_spends!(node_txn[2], revoked_local_txn[0]);
7618 // Each of the three justice transactions claim a separate (single) output of the three
7619 // available, which we check here:
7620 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7621 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7622 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7624 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7625 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7627 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7628 // output, checked above).
7629 assert_eq!(node_txn[3].input.len(), 2);
7630 assert_eq!(node_txn[3].output.len(), 1);
7631 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7633 first = node_txn[3].txid();
7634 // Store both feerates for later comparison
7635 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7636 feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu();
7637 penalty_txn = vec![node_txn[2].clone()];
7641 // Connect one more block to see if bumped penalty are issued for HTLC txn
7642 let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
7643 connect_block(&nodes[0], &block_130);
7644 let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
7645 connect_block(&nodes[0], &block_131);
7647 // Few more blocks to confirm penalty txn
7648 connect_blocks(&nodes[0], 4);
7649 assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7650 let header_144 = connect_blocks(&nodes[0], 9);
7652 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7653 assert_eq!(node_txn.len(), 1);
7655 assert_eq!(node_txn[0].input.len(), 2);
7656 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7657 // Verify bumped tx is different and 25% bump heuristic
7658 assert_ne!(first, node_txn[0].txid());
7659 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7660 let feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7661 assert!(feerate_2 * 100 > feerate_1 * 125);
7662 let txn = vec![node_txn[0].clone()];
7666 // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7667 connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
7668 connect_blocks(&nodes[0], 20);
7670 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7671 // We verify than no new transaction has been broadcast because previously
7672 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7673 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7674 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7675 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7676 // up bumped justice generation.
7677 assert_eq!(node_txn.len(), 0);
7680 check_closed_broadcast!(nodes[0], true);
7681 check_added_monitors!(nodes[0], 1);
7685 fn test_bump_penalty_txn_on_remote_commitment() {
7686 // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7687 // we're able to claim outputs on remote commitment transaction before timelocks expiration
7690 // Provide preimage for one
7691 // Check aggregation
7693 let chanmon_cfgs = create_chanmon_cfgs(2);
7694 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7695 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7696 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7698 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7699 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7700 route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7702 // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7703 let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7704 assert_eq!(remote_txn[0].output.len(), 4);
7705 assert_eq!(remote_txn[0].input.len(), 1);
7706 assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7708 // Claim a HTLC without revocation (provide B monitor with preimage)
7709 nodes[1].node.claim_funds(payment_preimage);
7710 expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7711 mine_transaction(&nodes[1], &remote_txn[0]);
7712 check_added_monitors!(nodes[1], 2);
7713 connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7715 // One or more claim tx should have been broadcast, check it
7719 let feerate_timeout;
7720 let feerate_preimage;
7722 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7723 // 3 transactions including:
7724 // preimage and timeout sweeps from remote commitment + preimage sweep bump
7725 assert_eq!(node_txn.len(), 3);
7726 assert_eq!(node_txn[0].input.len(), 1);
7727 assert_eq!(node_txn[1].input.len(), 1);
7728 assert_eq!(node_txn[2].input.len(), 1);
7729 check_spends!(node_txn[0], remote_txn[0]);
7730 check_spends!(node_txn[1], remote_txn[0]);
7731 check_spends!(node_txn[2], remote_txn[0]);
7733 preimage = node_txn[0].txid();
7734 let index = node_txn[0].input[0].previous_output.vout;
7735 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7736 feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu();
7738 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7739 (node_txn[2].clone(), node_txn[1].clone())
7741 (node_txn[1].clone(), node_txn[2].clone())
7744 preimage_bump = preimage_bump_tx;
7745 check_spends!(preimage_bump, remote_txn[0]);
7746 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7748 timeout = timeout_tx.txid();
7749 let index = timeout_tx.input[0].previous_output.vout;
7750 let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value;
7751 feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu();
7755 assert_ne!(feerate_timeout, 0);
7756 assert_ne!(feerate_preimage, 0);
7758 // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7759 connect_blocks(&nodes[1], 1);
7761 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7762 assert_eq!(node_txn.len(), 1);
7763 assert_eq!(node_txn[0].input.len(), 1);
7764 assert_eq!(preimage_bump.input.len(), 1);
7765 check_spends!(node_txn[0], remote_txn[0]);
7766 check_spends!(preimage_bump, remote_txn[0]);
7768 let index = preimage_bump.input[0].previous_output.vout;
7769 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
7770 let new_feerate = fee * 1000 / preimage_bump.weight().to_wu();
7771 assert!(new_feerate * 100 > feerate_timeout * 125);
7772 assert_ne!(timeout, preimage_bump.txid());
7774 let index = node_txn[0].input[0].previous_output.vout;
7775 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7776 let new_feerate = fee * 1000 / node_txn[0].weight().to_wu();
7777 assert!(new_feerate * 100 > feerate_preimage * 125);
7778 assert_ne!(preimage, node_txn[0].txid());
7783 nodes[1].node.get_and_clear_pending_events();
7784 nodes[1].node.get_and_clear_pending_msg_events();
7788 fn test_counterparty_raa_skip_no_crash() {
7789 // Previously, if our counterparty sent two RAAs in a row without us having provided a
7790 // commitment transaction, we would have happily carried on and provided them the next
7791 // commitment transaction based on one RAA forward. This would probably eventually have led to
7792 // channel closure, but it would not have resulted in funds loss. Still, our
7793 // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
7794 // check simply that the channel is closed in response to such an RAA, but don't check whether
7795 // we decide to punish our counterparty for revoking their funds (as we don't currently
7797 let chanmon_cfgs = create_chanmon_cfgs(2);
7798 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7799 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7800 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7801 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7803 let per_commitment_secret;
7804 let next_per_commitment_point;
7806 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7807 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7808 let keys = guard.channel_by_id.get_mut(&channel_id).map(
7809 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
7810 ).flatten().unwrap().get_signer();
7812 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7814 // Make signer believe we got a counterparty signature, so that it allows the revocation
7815 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7816 per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7818 // Must revoke without gaps
7819 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7820 keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7822 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7823 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7824 &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7827 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7828 &msgs::RevokeAndACK {
7830 per_commitment_secret,
7831 next_per_commitment_point,
7833 next_local_nonce: None,
7835 assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7836 check_added_monitors!(nodes[1], 1);
7837 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
7838 , [nodes[0].node.get_our_node_id()], 100000);
7842 fn test_bump_txn_sanitize_tracking_maps() {
7843 // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7844 // verify we clean then right after expiration of ANTI_REORG_DELAY.
7846 let chanmon_cfgs = create_chanmon_cfgs(2);
7847 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7848 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7849 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7851 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7852 // Lock HTLC in both directions
7853 let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7854 let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7856 let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7857 assert_eq!(revoked_local_txn[0].input.len(), 1);
7858 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7860 // Revoke local commitment tx
7861 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7863 // Broadcast set of revoked txn on A
7864 connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7865 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7866 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7868 mine_transaction(&nodes[0], &revoked_local_txn[0]);
7869 check_closed_broadcast!(nodes[0], true);
7870 check_added_monitors!(nodes[0], 1);
7871 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
7873 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7874 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7875 check_spends!(node_txn[0], revoked_local_txn[0]);
7876 check_spends!(node_txn[1], revoked_local_txn[0]);
7877 check_spends!(node_txn[2], revoked_local_txn[0]);
7878 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7882 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
7883 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7885 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7886 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7887 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7892 fn test_channel_conf_timeout() {
7893 // Tests that, for inbound channels, we give up on them if the funding transaction does not
7894 // confirm within 2016 blocks, as recommended by BOLT 2.
7895 let chanmon_cfgs = create_chanmon_cfgs(2);
7896 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7897 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7898 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7900 let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7902 // The outbound node should wait forever for confirmation:
7903 // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7904 // copied here instead of directly referencing the constant.
7905 connect_blocks(&nodes[0], 2016);
7906 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7908 // The inbound node should fail the channel after exactly 2016 blocks
7909 connect_blocks(&nodes[1], 2015);
7910 check_added_monitors!(nodes[1], 0);
7911 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7913 connect_blocks(&nodes[1], 1);
7914 check_added_monitors!(nodes[1], 1);
7915 check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
7916 let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7917 assert_eq!(close_ev.len(), 1);
7919 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => {
7920 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7921 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7923 _ => panic!("Unexpected event"),
7928 fn test_override_channel_config() {
7929 let chanmon_cfgs = create_chanmon_cfgs(2);
7930 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7931 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7932 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7934 // Node0 initiates a channel to node1 using the override config.
7935 let mut override_config = UserConfig::default();
7936 override_config.channel_handshake_config.our_to_self_delay = 200;
7938 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap();
7940 // Assert the channel created by node0 is using the override config.
7941 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7942 assert_eq!(res.common_fields.channel_flags, 0);
7943 assert_eq!(res.common_fields.to_self_delay, 200);
7947 fn test_override_0msat_htlc_minimum() {
7948 let mut zero_config = UserConfig::default();
7949 zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7950 let chanmon_cfgs = create_chanmon_cfgs(2);
7951 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7952 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7953 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7955 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
7956 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7957 assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7959 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7960 let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7961 assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7965 fn test_channel_update_has_correct_htlc_maximum_msat() {
7966 // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
7967 // Bolt 7 specifies that if present `htlc_maximum_msat`:
7968 // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
7969 // 90% of the `channel_value`.
7970 // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
7972 let mut config_30_percent = UserConfig::default();
7973 config_30_percent.channel_handshake_config.announced_channel = true;
7974 config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
7975 let mut config_50_percent = UserConfig::default();
7976 config_50_percent.channel_handshake_config.announced_channel = true;
7977 config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
7978 let mut config_95_percent = UserConfig::default();
7979 config_95_percent.channel_handshake_config.announced_channel = true;
7980 config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
7981 let mut config_100_percent = UserConfig::default();
7982 config_100_percent.channel_handshake_config.announced_channel = true;
7983 config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
7985 let chanmon_cfgs = create_chanmon_cfgs(4);
7986 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
7987 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
7988 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
7990 let channel_value_satoshis = 100000;
7991 let channel_value_msat = channel_value_satoshis * 1000;
7992 let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
7993 let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
7994 let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
7996 let (node_0_chan_update, node_1_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
7997 let (node_2_chan_update, node_3_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
7999 // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
8000 // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
8001 assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
8002 // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
8003 // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
8004 assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
8006 // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8007 // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
8009 assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8010 // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8011 // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
8013 assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8017 fn test_manually_accept_inbound_channel_request() {
8018 let mut manually_accept_conf = UserConfig::default();
8019 manually_accept_conf.manually_accept_inbound_channels = true;
8020 let chanmon_cfgs = create_chanmon_cfgs(2);
8021 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8022 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8023 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8025 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8026 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8028 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8030 // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8031 // accepting the inbound channel request.
8032 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8034 let events = nodes[1].node.get_and_clear_pending_events();
8036 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8037 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
8039 _ => panic!("Unexpected event"),
8042 let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8043 assert_eq!(accept_msg_ev.len(), 1);
8045 match accept_msg_ev[0] {
8046 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8047 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8049 _ => panic!("Unexpected event"),
8052 nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8054 let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8055 assert_eq!(close_msg_ev.len(), 1);
8057 let events = nodes[1].node.get_and_clear_pending_events();
8059 Event::ChannelClosed { user_channel_id, .. } => {
8060 assert_eq!(user_channel_id, 23);
8062 _ => panic!("Unexpected event"),
8067 fn test_manually_reject_inbound_channel_request() {
8068 let mut manually_accept_conf = UserConfig::default();
8069 manually_accept_conf.manually_accept_inbound_channels = true;
8070 let chanmon_cfgs = create_chanmon_cfgs(2);
8071 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8072 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8073 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8075 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8076 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8078 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8080 // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8081 // rejecting the inbound channel request.
8082 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8084 let events = nodes[1].node.get_and_clear_pending_events();
8086 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8087 nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8089 _ => panic!("Unexpected event"),
8092 let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8093 assert_eq!(close_msg_ev.len(), 1);
8095 match close_msg_ev[0] {
8096 MessageSendEvent::HandleError { ref node_id, .. } => {
8097 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8099 _ => panic!("Unexpected event"),
8102 // There should be no more events to process, as the channel was never opened.
8103 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
8107 fn test_can_not_accept_inbound_channel_twice() {
8108 let mut manually_accept_conf = UserConfig::default();
8109 manually_accept_conf.manually_accept_inbound_channels = true;
8110 let chanmon_cfgs = create_chanmon_cfgs(2);
8111 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8112 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8113 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8115 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8116 let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8118 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8120 // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8121 // accepting the inbound channel request.
8122 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8124 let events = nodes[1].node.get_and_clear_pending_events();
8126 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8127 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
8128 let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
8130 Err(APIError::APIMisuseError { err }) => {
8131 assert_eq!(err, "No such channel awaiting to be accepted.");
8133 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
8134 Err(e) => panic!("Unexpected Error {:?}", e),
8137 _ => panic!("Unexpected event"),
8140 // Ensure that the channel wasn't closed after attempting to accept it twice.
8141 let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8142 assert_eq!(accept_msg_ev.len(), 1);
8144 match accept_msg_ev[0] {
8145 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8146 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8148 _ => panic!("Unexpected event"),
8153 fn test_can_not_accept_unknown_inbound_channel() {
8154 let chanmon_cfg = create_chanmon_cfgs(2);
8155 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
8156 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
8157 let nodes = create_network(2, &node_cfg, &node_chanmgr);
8159 let unknown_channel_id = ChannelId::new_zero();
8160 let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
8162 Err(APIError::APIMisuseError { err }) => {
8163 assert_eq!(err, "No such channel awaiting to be accepted.");
8165 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
8166 Err(e) => panic!("Unexpected Error: {:?}", e),
8171 fn test_onion_value_mpp_set_calculation() {
8172 // Test that we use the onion value `amt_to_forward` when
8173 // calculating whether we've reached the `total_msat` of an MPP
8174 // by having a routing node forward more than `amt_to_forward`
8175 // and checking that the receiving node doesn't generate
8176 // a PaymentClaimable event too early
8178 let chanmon_cfgs = create_chanmon_cfgs(node_count);
8179 let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8180 let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8181 let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8183 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8184 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8185 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8186 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8188 let total_msat = 100_000;
8189 let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8190 let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8191 let sample_path = route.paths.pop().unwrap();
8193 let mut path_1 = sample_path.clone();
8194 path_1.hops[0].pubkey = nodes[1].node.get_our_node_id();
8195 path_1.hops[0].short_channel_id = chan_1_id;
8196 path_1.hops[1].pubkey = nodes[3].node.get_our_node_id();
8197 path_1.hops[1].short_channel_id = chan_3_id;
8198 path_1.hops[1].fee_msat = 100_000;
8199 route.paths.push(path_1);
8201 let mut path_2 = sample_path.clone();
8202 path_2.hops[0].pubkey = nodes[2].node.get_our_node_id();
8203 path_2.hops[0].short_channel_id = chan_2_id;
8204 path_2.hops[1].pubkey = nodes[3].node.get_our_node_id();
8205 path_2.hops[1].short_channel_id = chan_4_id;
8206 path_2.hops[1].fee_msat = 1_000;
8207 route.paths.push(path_2);
8210 let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8211 let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8212 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8213 nodes[0].node.test_send_payment_internal(&route, our_payment_hash,
8214 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8215 check_added_monitors!(nodes[0], expected_paths.len());
8217 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8218 assert_eq!(events.len(), expected_paths.len());
8221 let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8222 let mut payment_event = SendEvent::from_event(ev);
8223 let mut prev_node = &nodes[0];
8225 for (idx, &node) in expected_paths[0].iter().enumerate() {
8226 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8228 if idx == 0 { // routing node
8229 let session_priv = [3; 32];
8230 let height = nodes[0].best_block_info().1;
8231 let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8232 let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8233 let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
8234 let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
8235 &recipient_onion_fields, height + 1, &None).unwrap();
8236 // Edit amt_to_forward to simulate the sender having set
8237 // the final amount and the routing node taking less fee
8238 if let msgs::OutboundOnionPayload::Receive {
8239 ref mut sender_intended_htlc_amt_msat, ..
8240 } = onion_payloads[1] {
8241 *sender_intended_htlc_amt_msat = 99_000;
8243 let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
8244 payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8247 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8248 check_added_monitors!(node, 0);
8249 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8250 expect_pending_htlcs_forwardable!(node);
8253 let mut events_2 = node.node.get_and_clear_pending_msg_events();
8254 assert_eq!(events_2.len(), 1);
8255 check_added_monitors!(node, 1);
8256 payment_event = SendEvent::from_event(events_2.remove(0));
8257 assert_eq!(payment_event.msgs.len(), 1);
8259 let events_2 = node.node.get_and_clear_pending_events();
8260 assert!(events_2.is_empty());
8267 let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8268 pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8270 claim_payment_along_route(
8271 ClaimAlongRouteArgs::new(&nodes[0], expected_paths, our_payment_preimage)
8275 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8277 let routing_node_count = msat_amounts.len();
8278 let node_count = routing_node_count + 2;
8280 let chanmon_cfgs = create_chanmon_cfgs(node_count);
8281 let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8282 let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8283 let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8288 // Create channels for each amount
8289 let mut expected_paths = Vec::with_capacity(routing_node_count);
8290 let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8291 let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8292 for i in 0..routing_node_count {
8293 let routing_node = 2 + i;
8294 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8295 src_chan_ids.push(src_chan_id);
8296 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8297 dst_chan_ids.push(dst_chan_id);
8298 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8299 expected_paths.push(path);
8301 let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8303 // Create a route for each amount
8304 let example_amount = 100000;
8305 let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8306 let sample_path = route.paths.pop().unwrap();
8307 for i in 0..routing_node_count {
8308 let routing_node = 2 + i;
8309 let mut path = sample_path.clone();
8310 path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id();
8311 path.hops[0].short_channel_id = src_chan_ids[i];
8312 path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8313 path.hops[1].short_channel_id = dst_chan_ids[i];
8314 path.hops[1].fee_msat = msat_amounts[i];
8315 route.paths.push(path);
8318 // Send payment with manually set total_msat
8319 let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8320 let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash,
8321 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8322 nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash,
8323 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8324 check_added_monitors!(nodes[src_idx], expected_paths.len());
8326 let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8327 assert_eq!(events.len(), expected_paths.len());
8328 let mut amount_received = 0;
8329 for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8330 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8332 let current_path_amount = msat_amounts[path_idx];
8333 amount_received += current_path_amount;
8334 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8335 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8338 claim_payment_along_route(
8339 ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, our_payment_preimage)
8344 fn test_overshoot_mpp() {
8345 do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8346 do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8350 fn test_simple_mpp() {
8351 // Simple test of sending a multi-path payment.
8352 let chanmon_cfgs = create_chanmon_cfgs(4);
8353 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8354 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8355 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8357 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8358 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8359 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8360 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8362 let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8363 let path = route.paths[0].clone();
8364 route.paths.push(path);
8365 route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
8366 route.paths[0].hops[0].short_channel_id = chan_1_id;
8367 route.paths[0].hops[1].short_channel_id = chan_3_id;
8368 route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
8369 route.paths[1].hops[0].short_channel_id = chan_2_id;
8370 route.paths[1].hops[1].short_channel_id = chan_4_id;
8371 send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8372 claim_payment_along_route(
8373 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage)
8378 fn test_preimage_storage() {
8379 // Simple test of payment preimage storage allowing no client-side storage to claim payments
8380 let chanmon_cfgs = create_chanmon_cfgs(2);
8381 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8382 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8383 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8385 create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8388 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8389 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8390 nodes[0].node.send_payment_with_route(&route, payment_hash,
8391 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8392 check_added_monitors!(nodes[0], 1);
8393 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8394 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8395 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8396 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8398 // Note that after leaving the above scope we have no knowledge of any arguments or return
8399 // values from previous calls.
8400 expect_pending_htlcs_forwardable!(nodes[1]);
8401 let events = nodes[1].node.get_and_clear_pending_events();
8402 assert_eq!(events.len(), 1);
8404 Event::PaymentClaimable { ref purpose, .. } => {
8406 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => {
8407 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8409 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
8412 _ => panic!("Unexpected event"),
8417 fn test_bad_secret_hash() {
8418 // Simple test of unregistered payment hash/invalid payment secret handling
8419 let chanmon_cfgs = create_chanmon_cfgs(2);
8420 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8421 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8422 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8424 create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8426 let random_payment_hash = PaymentHash([42; 32]);
8427 let random_payment_secret = PaymentSecret([43; 32]);
8428 let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8429 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8431 // All the below cases should end up being handled exactly identically, so we macro the
8432 // resulting events.
8433 macro_rules! handle_unknown_invalid_payment_data {
8434 ($payment_hash: expr) => {
8435 check_added_monitors!(nodes[0], 1);
8436 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8437 let payment_event = SendEvent::from_event(events.pop().unwrap());
8438 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8439 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8441 // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8442 // again to process the pending backwards-failure of the HTLC
8443 expect_pending_htlcs_forwardable!(nodes[1]);
8444 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8445 check_added_monitors!(nodes[1], 1);
8447 // We should fail the payment back
8448 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8449 match events.pop().unwrap() {
8450 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8451 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8452 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8454 _ => panic!("Unexpected event"),
8459 let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8460 // Error data is the HTLC value (100,000) and current block height
8461 let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8463 // Send a payment with the right payment hash but the wrong payment secret
8464 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8465 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8466 handle_unknown_invalid_payment_data!(our_payment_hash);
8467 expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8469 // Send a payment with a random payment hash, but the right payment secret
8470 nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8471 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8472 handle_unknown_invalid_payment_data!(random_payment_hash);
8473 expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8475 // Send a payment with a random payment hash and random payment secret
8476 nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8477 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8478 handle_unknown_invalid_payment_data!(random_payment_hash);
8479 expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8483 fn test_update_err_monitor_lockdown() {
8484 // Our monitor will lock update of local commitment transaction if a broadcastion condition
8485 // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8486 // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8489 // This scenario may happen in a watchtower setup, where watchtower process a block height
8490 // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8491 // commitment at same time.
8493 let chanmon_cfgs = create_chanmon_cfgs(2);
8494 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8495 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8496 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8498 // Create some initial channel
8499 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8500 let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8502 // Rebalance the network to generate htlc in the two directions
8503 send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8505 // Route a HTLC from node 0 to node 1 (but don't settle)
8506 let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8508 // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8509 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8510 let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8511 let persister = test_utils::TestPersister::new();
8514 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8515 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8516 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8517 assert!(new_monitor == *monitor);
8520 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8521 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8524 let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8525 // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8526 // transaction lock time requirements here.
8527 chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
8528 watchtower.chain_monitor.block_connected(&block, 200);
8530 // Try to update ChannelMonitor
8531 nodes[1].node.claim_funds(preimage);
8532 check_added_monitors!(nodes[1], 1);
8533 expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8535 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8536 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8537 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8539 let mut node_0_per_peer_lock;
8540 let mut node_0_peer_state_lock;
8541 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8542 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8543 assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8544 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8545 } else { assert!(false); }
8550 // Our local monitor is in-sync and hasn't processed yet timeout
8551 check_added_monitors!(nodes[0], 1);
8552 let events = nodes[0].node.get_and_clear_pending_events();
8553 assert_eq!(events.len(), 1);
8557 fn test_concurrent_monitor_claim() {
8558 // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8559 // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8560 // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8561 // state N+1 confirms. Alice claims output from state N+1.
8563 let chanmon_cfgs = create_chanmon_cfgs(2);
8564 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8565 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8566 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8568 // Create some initial channel
8569 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8570 let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8572 // Rebalance the network to generate htlc in the two directions
8573 send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8575 // Route a HTLC from node 0 to node 1 (but don't settle)
8576 route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8578 // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8579 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8580 let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8581 let persister = test_utils::TestPersister::new();
8582 let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
8583 Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
8585 let watchtower_alice = {
8587 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8588 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8589 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8590 assert!(new_monitor == *monitor);
8593 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8594 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8597 let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8598 // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
8599 // requirements here.
8600 const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
8601 alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
8602 watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
8604 // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8606 let mut txn = alice_broadcaster.txn_broadcast();
8607 assert_eq!(txn.len(), 2);
8608 check_spends!(txn[0], chan_1.3);
8609 check_spends!(txn[1], txn[0]);
8612 // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8613 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8614 let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8615 let persister = test_utils::TestPersister::new();
8616 let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
8617 let watchtower_bob = {
8619 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8620 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8621 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8622 assert!(new_monitor == *monitor);
8625 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8626 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8629 watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
8631 // Route another payment to generate another update with still previous HTLC pending
8632 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8633 nodes[1].node.send_payment_with_route(&route, payment_hash,
8634 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8635 check_added_monitors!(nodes[1], 1);
8637 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8638 assert_eq!(updates.update_add_htlcs.len(), 1);
8639 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8641 let mut node_0_per_peer_lock;
8642 let mut node_0_peer_state_lock;
8643 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8644 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8645 // Watchtower Alice should already have seen the block and reject the update
8646 assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8647 assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8648 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8649 } else { assert!(false); }
8654 // Our local monitor is in-sync and hasn't processed yet timeout
8655 check_added_monitors!(nodes[0], 1);
8657 //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8658 watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
8660 // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8663 let mut txn = bob_broadcaster.txn_broadcast();
8664 assert_eq!(txn.len(), 2);
8665 bob_state_y = txn.remove(0);
8668 // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8669 let height = HTLC_TIMEOUT_BROADCAST + 1;
8670 connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
8671 check_closed_broadcast(&nodes[0], 1, true);
8672 check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
8673 [nodes[1].node.get_our_node_id()], 100000);
8674 watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
8675 check_added_monitors(&nodes[0], 1);
8677 let htlc_txn = alice_broadcaster.txn_broadcast();
8678 assert_eq!(htlc_txn.len(), 1);
8679 check_spends!(htlc_txn[0], bob_state_y);
8684 fn test_pre_lockin_no_chan_closed_update() {
8685 // Test that if a peer closes a channel in response to a funding_created message we don't
8686 // generate a channel update (as the channel cannot appear on chain without a funding_signed
8689 // Doing so would imply a channel monitor update before the initial channel monitor
8690 // registration, violating our API guarantees.
8692 // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8693 // then opening a second channel with the same funding output as the first (which is not
8694 // rejected because the first channel does not exist in the ChannelManager) and closing it
8695 // before receiving funding_signed.
8696 let chanmon_cfgs = create_chanmon_cfgs(2);
8697 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8698 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8699 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8701 // Create an initial channel
8702 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8703 let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8704 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8705 let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8706 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8708 // Move the first channel through the funding flow...
8709 let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8711 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8712 check_added_monitors!(nodes[0], 0);
8714 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8715 let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
8716 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8717 assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8718 check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
8719 [nodes[1].node.get_our_node_id()], 100000);
8723 fn test_htlc_no_detection() {
8724 // This test is a mutation to underscore the detection logic bug we had
8725 // before #653. HTLC value routed is above the remaining balance, thus
8726 // inverting HTLC and `to_remote` output. HTLC will come second and
8727 // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8728 // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8729 // outputs order detection for correct spending children filtring.
8731 let chanmon_cfgs = create_chanmon_cfgs(2);
8732 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8733 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8734 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8736 // Create some initial channels
8737 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8739 send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8740 let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8741 let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8742 assert_eq!(local_txn[0].input.len(), 1);
8743 assert_eq!(local_txn[0].output.len(), 3);
8744 check_spends!(local_txn[0], chan_1.3);
8746 // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8747 let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
8748 connect_block(&nodes[0], &block);
8749 // We deliberately connect the local tx twice as this should provoke a failure calling
8750 // this test before #653 fix.
8751 chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
8752 check_closed_broadcast!(nodes[0], true);
8753 check_added_monitors!(nodes[0], 1);
8754 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
8755 connect_blocks(&nodes[0], TEST_FINAL_CLTV);
8757 let htlc_timeout = {
8758 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8759 assert_eq!(node_txn.len(), 1);
8760 assert_eq!(node_txn[0].input.len(), 1);
8761 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8762 check_spends!(node_txn[0], local_txn[0]);
8766 connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
8767 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8768 expect_payment_failed!(nodes[0], our_payment_hash, false);
8771 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8772 // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8773 // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8774 // Carol, Alice would be the upstream node, and Carol the downstream.)
8776 // Steps of the test:
8777 // 1) Alice sends a HTLC to Carol through Bob.
8778 // 2) Carol doesn't settle the HTLC.
8779 // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8780 // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8781 // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8782 // but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8783 // 5) Carol release the preimage to Bob off-chain.
8784 // 6) Bob claims the offered output on the broadcasted commitment.
8785 let chanmon_cfgs = create_chanmon_cfgs(3);
8786 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8787 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8788 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8790 // Create some initial channels
8791 let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8792 create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8794 // Steps (1) and (2):
8795 // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8796 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8798 // Check that Alice's commitment transaction now contains an output for this HTLC.
8799 let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8800 check_spends!(alice_txn[0], chan_ab.3);
8801 assert_eq!(alice_txn[0].output.len(), 2);
8802 check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8803 assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8804 assert_eq!(alice_txn.len(), 2);
8806 // Steps (3) and (4):
8807 // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8808 // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8809 let mut force_closing_node = 0; // Alice force-closes
8810 let mut counterparty_node = 1; // Bob if Alice force-closes
8813 if !broadcast_alice {
8814 force_closing_node = 1;
8815 counterparty_node = 0;
8817 nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
8818 check_closed_broadcast!(nodes[force_closing_node], true);
8819 check_added_monitors!(nodes[force_closing_node], 1);
8820 check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
8821 if go_onchain_before_fulfill {
8822 let txn_to_broadcast = match broadcast_alice {
8823 true => alice_txn.clone(),
8824 false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8826 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8827 if broadcast_alice {
8828 check_closed_broadcast!(nodes[1], true);
8829 check_added_monitors!(nodes[1], 1);
8830 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8835 // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8836 // process of removing the HTLC from their commitment transactions.
8837 nodes[2].node.claim_funds(payment_preimage);
8838 check_added_monitors!(nodes[2], 1);
8839 expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8841 let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8842 assert!(carol_updates.update_add_htlcs.is_empty());
8843 assert!(carol_updates.update_fail_htlcs.is_empty());
8844 assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8845 assert!(carol_updates.update_fee.is_none());
8846 assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8848 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8849 let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
8850 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
8851 // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8852 if !go_onchain_before_fulfill && broadcast_alice {
8853 let events = nodes[1].node.get_and_clear_pending_msg_events();
8854 assert_eq!(events.len(), 1);
8856 MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8857 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8859 _ => panic!("Unexpected event"),
8862 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8863 // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8864 // Carol<->Bob's updated commitment transaction info.
8865 check_added_monitors!(nodes[1], 2);
8867 let events = nodes[1].node.get_and_clear_pending_msg_events();
8868 assert_eq!(events.len(), 2);
8869 let bob_revocation = match events[0] {
8870 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8871 assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8874 _ => panic!("Unexpected event"),
8876 let bob_updates = match events[1] {
8877 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8878 assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8881 _ => panic!("Unexpected event"),
8884 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8885 check_added_monitors!(nodes[2], 1);
8886 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8887 check_added_monitors!(nodes[2], 1);
8889 let events = nodes[2].node.get_and_clear_pending_msg_events();
8890 assert_eq!(events.len(), 1);
8891 let carol_revocation = match events[0] {
8892 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8893 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8896 _ => panic!("Unexpected event"),
8898 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8899 check_added_monitors!(nodes[1], 1);
8901 // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8902 // here's where we put said channel's commitment tx on-chain.
8903 let mut txn_to_broadcast = alice_txn.clone();
8904 if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8905 if !go_onchain_before_fulfill {
8906 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8907 // If Bob was the one to force-close, he will have already passed these checks earlier.
8908 if broadcast_alice {
8909 check_closed_broadcast!(nodes[1], true);
8910 check_added_monitors!(nodes[1], 1);
8911 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8913 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8914 if broadcast_alice {
8915 assert_eq!(bob_txn.len(), 1);
8916 check_spends!(bob_txn[0], txn_to_broadcast[0]);
8918 if nodes[1].connect_style.borrow().updates_best_block_first() {
8919 assert_eq!(bob_txn.len(), 3);
8920 assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
8922 assert_eq!(bob_txn.len(), 2);
8924 check_spends!(bob_txn[0], chan_ab.3);
8929 // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8930 // broadcasted commitment transaction.
8932 let script_weight = match broadcast_alice {
8933 true => OFFERED_HTLC_SCRIPT_WEIGHT,
8934 false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8936 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8937 // Bob force-closed and broadcasts the commitment transaction along with a
8938 // HTLC-output-claiming transaction.
8939 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8940 if broadcast_alice {
8941 assert_eq!(bob_txn.len(), 1);
8942 check_spends!(bob_txn[0], txn_to_broadcast[0]);
8943 assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8945 assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
8946 let htlc_tx = bob_txn.pop().unwrap();
8947 check_spends!(htlc_tx, txn_to_broadcast[0]);
8948 assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
8954 fn test_onchain_htlc_settlement_after_close() {
8955 do_test_onchain_htlc_settlement_after_close(true, true);
8956 do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
8957 do_test_onchain_htlc_settlement_after_close(true, false);
8958 do_test_onchain_htlc_settlement_after_close(false, false);
8962 fn test_duplicate_temporary_channel_id_from_different_peers() {
8963 // Tests that we can accept two different `OpenChannel` requests with the same
8964 // `temporary_channel_id`, as long as they are from different peers.
8965 let chanmon_cfgs = create_chanmon_cfgs(3);
8966 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8967 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8968 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8970 // Create an first channel channel
8971 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8972 let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8974 // Create an second channel
8975 nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
8976 let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8978 // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
8979 // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
8980 open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
8982 // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
8983 // `temporary_channel_id` as they are from different peers.
8984 nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
8986 let events = nodes[0].node.get_and_clear_pending_msg_events();
8987 assert_eq!(events.len(), 1);
8989 MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8990 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
8991 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
8993 _ => panic!("Unexpected event"),
8997 nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
8999 let events = nodes[0].node.get_and_clear_pending_msg_events();
9000 assert_eq!(events.len(), 1);
9002 MessageSendEvent::SendAcceptChannel { node_id, msg } => {
9003 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
9004 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
9006 _ => panic!("Unexpected event"),
9012 fn test_peer_funding_sidechannel() {
9013 // Test that if a peer somehow learns which txid we'll use for our channel funding before we
9014 // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously
9015 // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning
9016 // the txid and panicked if the peer tried to open a redundant channel to us with the same
9017 // funding outpoint.
9019 // While this assumption is generally safe, some users may have out-of-band protocols where
9020 // they notify their LSP about a funding outpoint first, or this may be violated in the future
9021 // with collaborative transaction construction protocols, i.e. dual-funding.
9022 let chanmon_cfgs = create_chanmon_cfgs(3);
9023 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9024 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9025 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9027 let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9028 let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0);
9030 let (_, tx, funding_output) =
9031 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9033 let cs_funding_events = nodes[2].node.get_and_clear_pending_events();
9034 assert_eq!(cs_funding_events.len(), 1);
9035 match cs_funding_events[0] {
9036 Event::FundingGenerationReady { .. } => {}
9037 _ => panic!("Unexpected event {:?}", cs_funding_events),
9040 nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap();
9041 let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id());
9042 nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9043 get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id());
9044 expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id());
9045 check_added_monitors!(nodes[0], 1);
9047 let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone());
9048 let err_msg = format!("{:?}", res.unwrap_err());
9049 assert!(err_msg.contains("An existing channel using outpoint "));
9050 assert!(err_msg.contains(" is open with peer"));
9051 // Even though the last funding_transaction_generated errored, it still generated a
9052 // SendFundingCreated. However, when the peer responds with a funding_signed it will send the
9053 // appropriate error message.
9054 let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9055 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created);
9056 check_added_monitors!(nodes[1], 1);
9057 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9058 let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
9059 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
9061 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9062 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9063 get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9067 fn test_duplicate_conflicting_funding_from_second_peer() {
9068 // Test that if a user tries to fund a channel with a funding outpoint they'd previously used
9069 // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we
9070 // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks
9071 // implies the user (and our counterparty) has reused cryptographic keys across channels, which
9072 // we require the user not do.
9073 let chanmon_cfgs = create_chanmon_cfgs(4);
9074 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9075 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9076 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9078 let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9080 let (_, tx, funding_output) =
9081 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9083 // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into
9084 // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails.
9085 let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3;
9086 let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone();
9087 nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap();
9089 nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9091 let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9092 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9093 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9094 check_added_monitors!(nodes[1], 1);
9095 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9097 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9098 // At this point, the channel should be closed, after having generated one monitor write (the
9099 // watch_channel call which failed), but zero monitor updates.
9100 check_added_monitors!(nodes[0], 1);
9101 get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9102 let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() };
9103 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]);
9107 fn test_duplicate_funding_err_in_funding() {
9108 // Test that if we have a live channel with one peer, then another peer comes along and tries
9109 // to create a second channel with the same txid we'll fail and not overwrite the
9110 // outpoint_to_peer map in `ChannelManager`.
9112 // This was previously broken.
9113 let chanmon_cfgs = create_chanmon_cfgs(3);
9114 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9115 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9116 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9118 let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
9119 let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
9120 assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
9122 nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
9123 let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9124 let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
9125 open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
9126 nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
9127 let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
9128 accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
9129 nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
9131 // Now that we have a second channel with the same funding txo, send a bogus funding message
9132 // and let nodes[1] remove the inbound channel.
9133 let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42);
9135 nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap();
9137 let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9138 funding_created_msg.temporary_channel_id = real_channel_id;
9139 // Make the signature invalid by changing the funding output
9140 funding_created_msg.funding_output_index += 10;
9141 nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9142 get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
9143 let err = "Invalid funding_created signature from peer".to_owned();
9144 let reason = ClosureReason::ProcessingError { err };
9145 let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason);
9146 check_closed_events(&nodes[1], &[expected_closing]);
9149 *nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(),
9150 nodes[0].node.get_our_node_id()
9155 fn test_duplicate_chan_id() {
9156 // Test that if a given peer tries to open a channel with the same channel_id as one that is
9157 // already open we reject it and keep the old channel.
9159 // Previously, full_stack_target managed to figure out that if you tried to open two channels
9160 // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
9161 // the existing channel when we detect the duplicate new channel, screwing up our monitor
9162 // updating logic for the existing channel.
9163 let chanmon_cfgs = create_chanmon_cfgs(2);
9164 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9165 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9166 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9168 // Create an initial channel
9169 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9170 let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9171 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9172 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9174 // Try to create a second channel with the same temporary_channel_id as the first and check
9175 // that it is rejected.
9176 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9178 let events = nodes[1].node.get_and_clear_pending_msg_events();
9179 assert_eq!(events.len(), 1);
9181 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9182 // Technically, at this point, nodes[1] would be justified in thinking both the
9183 // first (valid) and second (invalid) channels are closed, given they both have
9184 // the same non-temporary channel_id. However, currently we do not, so we just
9185 // move forward with it.
9186 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9187 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9189 _ => panic!("Unexpected event"),
9193 // Move the first channel through the funding flow...
9194 let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9196 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9197 check_added_monitors!(nodes[0], 0);
9199 let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9200 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9202 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
9203 assert_eq!(added_monitors.len(), 1);
9204 assert_eq!(added_monitors[0].0, funding_output);
9205 added_monitors.clear();
9207 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9209 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9211 let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
9212 let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
9214 // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
9217 // First try to open a second channel with a temporary channel id equal to the txid-based one.
9218 // Technically this is allowed by the spec, but we don't support it and there's little reason
9219 // to. Still, it shouldn't cause any other issues.
9220 open_chan_msg.common_fields.temporary_channel_id = channel_id;
9221 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9223 let events = nodes[1].node.get_and_clear_pending_msg_events();
9224 assert_eq!(events.len(), 1);
9226 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9227 // Technically, at this point, nodes[1] would be justified in thinking both
9228 // channels are closed, but currently we do not, so we just move forward with it.
9229 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9230 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9232 _ => panic!("Unexpected event"),
9236 // Now try to create a second channel which has a duplicate funding output.
9237 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9238 let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9239 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
9240 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9241 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
9243 let funding_created = {
9244 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9245 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9246 // Once we call `get_funding_created` the channel has a duplicate channel_id as
9247 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
9248 // try to create another channel. Instead, we drop the channel entirely here (leaving the
9249 // channelmanager in a possibly nonsense state instead).
9250 match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
9251 ChannelPhase::UnfundedOutboundV1(mut chan) => {
9252 let logger = test_utils::TestLogger::new();
9253 chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
9255 _ => panic!("Unexpected ChannelPhase variant"),
9258 check_added_monitors!(nodes[0], 0);
9259 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9260 // At this point we'll look up if the channel_id is present and immediately fail the channel
9261 // without trying to persist the `ChannelMonitor`.
9262 check_added_monitors!(nodes[1], 0);
9264 check_closed_events(&nodes[1], &[
9265 ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
9266 err: "Already had channel with the new channel_id".to_owned()
9270 // ...still, nodes[1] will reject the duplicate channel.
9272 let events = nodes[1].node.get_and_clear_pending_msg_events();
9273 assert_eq!(events.len(), 1);
9275 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9276 // Technically, at this point, nodes[1] would be justified in thinking both
9277 // channels are closed, but currently we do not, so we just move forward with it.
9278 assert_eq!(msg.channel_id, channel_id);
9279 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9281 _ => panic!("Unexpected event"),
9285 // finally, finish creating the original channel and send a payment over it to make sure
9286 // everything is functional.
9287 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9289 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9290 assert_eq!(added_monitors.len(), 1);
9291 assert_eq!(added_monitors[0].0, funding_output);
9292 added_monitors.clear();
9294 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9296 let events_4 = nodes[0].node.get_and_clear_pending_events();
9297 assert_eq!(events_4.len(), 0);
9298 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9299 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9301 let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9302 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9303 update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9305 send_payment(&nodes[0], &[&nodes[1]], 8000000);
9309 fn test_error_chans_closed() {
9310 // Test that we properly handle error messages, closing appropriate channels.
9312 // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9313 // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9314 // we can test various edge cases around it to ensure we don't regress.
9315 let chanmon_cfgs = create_chanmon_cfgs(3);
9316 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9317 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9318 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9320 // Create some initial channels
9321 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9322 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9323 let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
9325 assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9326 assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9327 assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9329 // Closing a channel from a different peer has no effect
9330 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9331 assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9333 // Closing one channel doesn't impact others
9334 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9335 check_added_monitors!(nodes[0], 1);
9336 check_closed_broadcast!(nodes[0], false);
9337 check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9338 [nodes[1].node.get_our_node_id()], 100000);
9339 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9340 assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9341 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9342 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9344 // A null channel ID should close all channels
9345 let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9346 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
9347 check_added_monitors!(nodes[0], 2);
9348 check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9349 [nodes[1].node.get_our_node_id(); 2], 100000);
9350 let events = nodes[0].node.get_and_clear_pending_msg_events();
9351 assert_eq!(events.len(), 2);
9353 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9354 assert_eq!(msg.contents.flags & 2, 2);
9356 _ => panic!("Unexpected event"),
9359 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9360 assert_eq!(msg.contents.flags & 2, 2);
9362 _ => panic!("Unexpected event"),
9364 // Note that at this point users of a standard PeerHandler will end up calling
9365 // peer_disconnected.
9366 assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9367 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9369 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9370 assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9371 assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9375 fn test_invalid_funding_tx() {
9376 // Test that we properly handle invalid funding transactions sent to us from a peer.
9378 // Previously, all other major lightning implementations had failed to properly sanitize
9379 // funding transactions from their counterparties, leading to a multi-implementation critical
9380 // security vulnerability (though we always sanitized properly, we've previously had
9381 // un-released crashes in the sanitization process).
9383 // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9384 // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9385 // gave up on it. We test this here by generating such a transaction.
9386 let chanmon_cfgs = create_chanmon_cfgs(2);
9387 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9388 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9389 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9391 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap();
9392 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9393 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9395 let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9397 // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9398 // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9399 // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9401 let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9402 let wit_program_script: ScriptBuf = wit_program.into();
9403 for output in tx.output.iter_mut() {
9404 // Make the confirmed funding transaction have a bogus script_pubkey
9405 output.script_pubkey = ScriptBuf::new_v0_p2wsh(&wit_program_script.wscript_hash());
9408 nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9409 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9410 check_added_monitors!(nodes[1], 1);
9411 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9413 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9414 check_added_monitors!(nodes[0], 1);
9415 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9417 let events_1 = nodes[0].node.get_and_clear_pending_events();
9418 assert_eq!(events_1.len(), 0);
9420 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9421 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9422 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9424 let expected_err = "funding tx had wrong script/value or output index";
9425 confirm_transaction_at(&nodes[1], &tx, 1);
9426 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
9427 [nodes[0].node.get_our_node_id()], 100000);
9428 check_added_monitors!(nodes[1], 1);
9429 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9430 assert_eq!(events_2.len(), 1);
9431 if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9432 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9433 if let msgs::ErrorAction::DisconnectPeer { msg } = action {
9434 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err);
9435 } else { panic!(); }
9436 } else { panic!(); }
9437 assert_eq!(nodes[1].node.list_channels().len(), 0);
9439 // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9440 // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9441 // as its not 32 bytes long.
9442 let mut spend_tx = Transaction {
9443 version: 2i32, lock_time: LockTime::ZERO,
9444 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9445 previous_output: BitcoinOutPoint {
9449 script_sig: ScriptBuf::new(),
9450 sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9451 witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness())
9453 output: vec![TxOut {
9455 script_pubkey: ScriptBuf::new(),
9458 check_spends!(spend_tx, tx);
9459 mine_transaction(&nodes[1], &spend_tx);
9463 fn test_coinbase_funding_tx() {
9464 // Miners are able to fund channels directly from coinbase transactions, however
9465 // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
9466 // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
9467 // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
9469 // Note that 0conf channels with coinbase funding transactions are unaffected and are
9470 // immediately operational after opening.
9471 let chanmon_cfgs = create_chanmon_cfgs(2);
9472 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9473 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9474 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9476 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9477 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9479 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9480 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9482 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9484 // Create the coinbase funding transaction.
9485 let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9487 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9488 check_added_monitors!(nodes[0], 0);
9489 let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9491 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9492 check_added_monitors!(nodes[1], 1);
9493 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9495 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9497 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9498 check_added_monitors!(nodes[0], 1);
9500 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9501 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
9503 // Starting at height 0, we "confirm" the coinbase at height 1.
9504 confirm_transaction_at(&nodes[0], &tx, 1);
9505 // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
9506 connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
9507 // Check that we have no pending message events (we have not queued a `channel_ready` yet).
9508 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
9509 // Now connect one more block which results in 100 confirmations of the coinbase transaction.
9510 connect_blocks(&nodes[0], 1);
9511 // There should now be a `channel_ready` which can be handled.
9512 let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
9514 confirm_transaction_at(&nodes[1], &tx, 1);
9515 connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
9516 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
9517 connect_blocks(&nodes[1], 1);
9518 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
9519 create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
9522 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9523 // In the first version of the chain::Confirm interface, after a refactor was made to not
9524 // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9525 // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9526 // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9527 // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9528 // spending transaction until height N+1 (or greater). This was due to the way
9529 // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9530 // spending transaction at the height the input transaction was confirmed at, not whether we
9531 // should broadcast a spending transaction at the current height.
9532 // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9533 // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9534 // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9535 // until we learned about an additional block.
9537 // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9538 // aren't broadcasting transactions too early (ie not broadcasting them at all).
9539 let chanmon_cfgs = create_chanmon_cfgs(3);
9540 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9541 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9542 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9543 *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9545 create_announced_chan_between_nodes(&nodes, 0, 1);
9546 let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9547 let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9548 nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9549 nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9551 nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9552 check_closed_broadcast!(nodes[1], true);
9553 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
9554 check_added_monitors!(nodes[1], 1);
9555 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9556 assert_eq!(node_txn.len(), 1);
9558 let conf_height = nodes[1].best_block_info().1;
9559 if !test_height_before_timelock {
9560 connect_blocks(&nodes[1], 24 * 6);
9562 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9563 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9564 if test_height_before_timelock {
9565 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9566 // generate any events or broadcast any transactions
9567 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9568 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9570 // We should broadcast an HTLC transaction spending our funding transaction first
9571 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9572 assert_eq!(spending_txn.len(), 2);
9573 let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
9578 check_spends!(htlc_tx, node_txn[0]);
9579 // We should also generate a SpendableOutputs event with the to_self output (as its
9581 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9582 assert_eq!(descriptor_spend_txn.len(), 1);
9584 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9585 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9586 // additional block built on top of the current chain.
9587 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9588 &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
9589 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9590 check_added_monitors!(nodes[1], 1);
9592 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9593 assert!(updates.update_add_htlcs.is_empty());
9594 assert!(updates.update_fulfill_htlcs.is_empty());
9595 assert_eq!(updates.update_fail_htlcs.len(), 1);
9596 assert!(updates.update_fail_malformed_htlcs.is_empty());
9597 assert!(updates.update_fee.is_none());
9598 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9599 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9600 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9605 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9606 do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9607 do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9610 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9611 let chanmon_cfgs = create_chanmon_cfgs(2);
9612 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9613 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9614 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9616 let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9618 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9619 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
9620 let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
9622 let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9625 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9626 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9627 check_added_monitors!(nodes[0], 1);
9628 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9629 assert_eq!(events.len(), 1);
9630 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9631 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9632 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9634 expect_pending_htlcs_forwardable!(nodes[1]);
9635 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9638 // Note that we use a different PaymentId here to allow us to duplicativly pay
9639 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9640 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9641 check_added_monitors!(nodes[0], 1);
9642 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9643 assert_eq!(events.len(), 1);
9644 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9645 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9646 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9647 // At this point, nodes[1] would notice it has too much value for the payment. It will
9648 // assume the second is a privacy attack (no longer particularly relevant
9649 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9650 // the first HTLC delivered above.
9653 expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9654 nodes[1].node.process_pending_htlc_forwards();
9656 if test_for_second_fail_panic {
9657 // Now we go fail back the first HTLC from the user end.
9658 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9660 let expected_destinations = vec![
9661 HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9662 HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9664 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations);
9665 nodes[1].node.process_pending_htlc_forwards();
9667 check_added_monitors!(nodes[1], 1);
9668 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9669 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9671 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9672 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9673 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9675 let failure_events = nodes[0].node.get_and_clear_pending_events();
9676 assert_eq!(failure_events.len(), 4);
9677 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9678 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9679 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9680 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9682 // Let the second HTLC fail and claim the first
9683 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9684 nodes[1].node.process_pending_htlc_forwards();
9686 check_added_monitors!(nodes[1], 1);
9687 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9688 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9689 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9691 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9693 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9698 fn test_dup_htlc_second_fail_panic() {
9699 // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9700 // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9701 // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9702 // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9703 do_test_dup_htlc_second_rejected(true);
9707 fn test_dup_htlc_second_rejected() {
9708 // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9709 // simply reject the second HTLC but are still able to claim the first HTLC.
9710 do_test_dup_htlc_second_rejected(false);
9714 fn test_inconsistent_mpp_params() {
9715 // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9716 // such HTLC and allow the second to stay.
9717 let chanmon_cfgs = create_chanmon_cfgs(4);
9718 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9719 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9720 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9722 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9723 create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9724 create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9725 let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9727 let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9728 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
9729 let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
9730 assert_eq!(route.paths.len(), 2);
9731 route.paths.sort_by(|path_a, _| {
9732 // Sort the path so that the path through nodes[1] comes first
9733 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9734 core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9737 let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9739 let cur_height = nodes[0].best_block_info().1;
9740 let payment_id = PaymentId([42; 32]);
9742 let session_privs = {
9743 // We create a fake route here so that we start with three pending HTLCs, which we'll
9744 // ultimately have, just not right away.
9745 let mut dup_route = route.clone();
9746 dup_route.paths.push(route.paths[1].clone());
9747 nodes[0].node.test_add_new_pending_payment(our_payment_hash,
9748 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap()
9750 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
9751 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9752 &None, session_privs[0]).unwrap();
9753 check_added_monitors!(nodes[0], 1);
9756 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9757 assert_eq!(events.len(), 1);
9758 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9760 assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9762 nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9763 RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9764 check_added_monitors!(nodes[0], 1);
9767 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9768 assert_eq!(events.len(), 1);
9769 let payment_event = SendEvent::from_event(events.pop().unwrap());
9771 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9772 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9774 expect_pending_htlcs_forwardable!(nodes[2]);
9775 check_added_monitors!(nodes[2], 1);
9777 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9778 assert_eq!(events.len(), 1);
9779 let payment_event = SendEvent::from_event(events.pop().unwrap());
9781 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9782 check_added_monitors!(nodes[3], 0);
9783 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9785 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9786 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9787 // post-payment_secrets) and fail back the new HTLC.
9789 expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9790 nodes[3].node.process_pending_htlc_forwards();
9791 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9792 nodes[3].node.process_pending_htlc_forwards();
9794 check_added_monitors!(nodes[3], 1);
9796 let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9797 nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9798 commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9800 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9801 check_added_monitors!(nodes[2], 1);
9803 let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9804 nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9805 commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9807 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9809 nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9810 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9811 &None, session_privs[2]).unwrap();
9812 check_added_monitors!(nodes[0], 1);
9814 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9815 assert_eq!(events.len(), 1);
9816 pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9818 do_claim_payment_along_route(
9819 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage)
9821 expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
9825 fn test_double_partial_claim() {
9826 // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9827 // time out, the sender resends only some of the MPP parts, then the user processes the
9828 // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9830 let chanmon_cfgs = create_chanmon_cfgs(4);
9831 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9832 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9833 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9835 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9836 create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9837 create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9838 create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9840 let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9841 assert_eq!(route.paths.len(), 2);
9842 route.paths.sort_by(|path_a, _| {
9843 // Sort the path so that the path through nodes[1] comes first
9844 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9845 core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9848 send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9849 // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9850 // amount of time to respond to.
9852 // Connect some blocks to time out the payment
9853 connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9854 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9856 let failed_destinations = vec![
9857 HTLCDestination::FailedPayment { payment_hash },
9858 HTLCDestination::FailedPayment { payment_hash },
9860 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9862 pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
9864 // nodes[1] now retries one of the two paths...
9865 nodes[0].node.send_payment_with_route(&route, payment_hash,
9866 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9867 check_added_monitors!(nodes[0], 2);
9869 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9870 assert_eq!(events.len(), 2);
9871 let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9872 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9874 // At this point nodes[3] has received one half of the payment, and the user goes to handle
9875 // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9876 nodes[3].node.claim_funds(payment_preimage);
9877 check_added_monitors!(nodes[3], 0);
9878 assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9881 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9882 #[derive(Clone, Copy, PartialEq)]
9883 enum ExposureEvent {
9884 /// Breach occurs at HTLC forwarding (see `send_htlc`)
9886 /// Breach occurs at HTLC reception (see `update_add_htlc`)
9888 /// Breach occurs at outbound update_fee (see `send_update_fee`)
9889 AtUpdateFeeOutbound,
9892 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) {
9893 // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9896 // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9897 // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9898 // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9899 // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9900 // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9901 // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9902 // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9903 // might be available again for HTLC processing once the dust bandwidth has cleared up.
9905 let chanmon_cfgs = create_chanmon_cfgs(2);
9906 let mut config = test_default_channel_config();
9908 // We hard-code the feerate values here but they're re-calculated furter down and asserted.
9909 // If the values ever change below these constants should simply be updated.
9910 const AT_FEE_OUTBOUND_HTLCS: u64 = 20;
9911 let nondust_htlc_count_in_limit =
9912 if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
9913 AT_FEE_OUTBOUND_HTLCS
9915 let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 };
9916 let expected_dust_buffer_feerate = initial_feerate + 2530;
9917 let mut commitment_tx_cost = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty());
9918 commitment_tx_cost +=
9920 htlc_success_tx_weight(&ChannelTypeFeatures::empty())
9922 htlc_timeout_tx_weight(&ChannelTypeFeatures::empty())
9923 } * (initial_feerate as u64 - 253) / 1000 * nondust_htlc_count_in_limit;
9925 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9926 *feerate_lock = initial_feerate;
9928 config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
9929 // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
9930 // to get roughly the same initial value as the default setting when this test was
9931 // originally written.
9932 MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost) / 253)
9933 } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost) };
9934 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9935 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9936 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9938 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
9939 let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9940 open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
9941 open_channel.common_fields.max_accepted_htlcs = 60;
9943 open_channel.common_fields.dust_limit_satoshis = 546;
9945 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9946 let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9947 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9949 let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9951 let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9954 let mut node_0_per_peer_lock;
9955 let mut node_0_peer_state_lock;
9956 match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
9957 ChannelPhase::UnfundedOutboundV1(chan) => {
9958 chan.context.holder_dust_limit_satoshis = 546;
9960 _ => panic!("Unexpected ChannelPhase variant"),
9964 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9965 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9966 check_added_monitors!(nodes[1], 1);
9967 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9969 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9970 check_added_monitors!(nodes[0], 1);
9971 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9973 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9974 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9975 update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9978 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9979 *feerate_lock = 253;
9982 // Fetch a route in advance as we will be unable to once we're unable to send.
9983 let (mut route, payment_hash, _, payment_secret) =
9984 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
9986 let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
9987 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9988 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9989 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
9990 (chan.context().get_dust_buffer_feerate(None) as u64,
9991 chan.context().get_max_dust_htlc_exposure_msat(253))
9993 assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64);
9994 let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
9995 let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
9997 // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit.
9998 // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1
9999 // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`.
10000 let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10001 let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
10003 // This test was written with a fixed dust value here, which we retain, but assert that it is,
10004 // indeed, dust on both transactions.
10005 let dust_htlc_on_counterparty_tx: u64 = 4;
10006 let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000;
10007 let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10008 assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat);
10009 assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat);
10012 if dust_outbound_balance {
10013 // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10014 // Outbound dust balance: 4372 sats
10015 // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
10016 for _ in 0..dust_outbound_htlc_on_holder_tx {
10017 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
10018 nodes[0].node.send_payment_with_route(&route, payment_hash,
10019 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10022 // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10023 // Inbound dust balance: 4372 sats
10024 // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
10025 for _ in 0..dust_inbound_htlc_on_holder_tx {
10026 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
10030 if dust_outbound_balance {
10031 // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10032 // Outbound dust balance: 5000 sats
10033 for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10034 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
10035 nodes[0].node.send_payment_with_route(&route, payment_hash,
10036 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10039 // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10040 // Inbound dust balance: 5000 sats
10041 for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10042 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
10047 if exposure_breach_event == ExposureEvent::AtHTLCForward {
10048 route.paths[0].hops.last_mut().unwrap().fee_msat =
10049 if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
10050 // With default dust exposure: 5000 sats
10052 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10053 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10054 ), true, APIError::ChannelUnavailable { .. }, {});
10056 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10057 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10058 ), true, APIError::ChannelUnavailable { .. }, {});
10060 } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
10061 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
10062 nodes[1].node.send_payment_with_route(&route, payment_hash,
10063 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10064 check_added_monitors!(nodes[1], 1);
10065 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
10066 assert_eq!(events.len(), 1);
10067 let payment_event = SendEvent::from_event(events.remove(0));
10068 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
10069 // With default dust exposure: 5000 sats
10071 // Outbound dust balance: 6399 sats
10072 let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
10073 let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
10074 nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
10076 // Outbound dust balance: 5200 sats
10077 nodes[0].logger.assert_log("lightning::ln::channel",
10078 format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
10079 dust_htlc_on_counterparty_tx_msat * dust_htlc_on_counterparty_tx + commitment_tx_cost + 4,
10080 max_dust_htlc_exposure_msat), 1);
10082 } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
10083 route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
10084 // For the multiplier dust exposure limit, since it scales with feerate,
10085 // we need to add a lot of HTLCs that will become dust at the new feerate
10086 // to cross the threshold.
10087 for _ in 0..AT_FEE_OUTBOUND_HTLCS {
10088 let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
10089 nodes[0].node.send_payment_with_route(&route, payment_hash,
10090 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10093 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10094 *feerate_lock = *feerate_lock * 10;
10096 nodes[0].node.timer_tick_occurred();
10097 check_added_monitors!(nodes[0], 1);
10098 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
10101 let _ = nodes[0].node.get_and_clear_pending_msg_events();
10102 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
10103 added_monitors.clear();
10106 fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) {
10107 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10108 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10109 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10110 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10111 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10112 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10113 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10114 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10115 if !multiplier_dust_limit && !apply_excess_fee {
10116 // Because non-dust HTLC transaction fees are included in the dust exposure, trying to
10117 // increase the fee to hit a higher dust exposure with a
10118 // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these
10119 // in the `multiplier_dust_limit` case.
10120 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10121 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10122 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10123 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10128 fn test_max_dust_htlc_exposure() {
10129 do_test_max_dust_htlc_exposure_by_threshold_type(false, false);
10130 do_test_max_dust_htlc_exposure_by_threshold_type(false, true);
10131 do_test_max_dust_htlc_exposure_by_threshold_type(true, false);
10132 do_test_max_dust_htlc_exposure_by_threshold_type(true, true);
10136 fn test_nondust_htlc_fees_are_dust() {
10137 // Test that the transaction fees paid in nondust HTLCs count towards our dust limit
10138 let chanmon_cfgs = create_chanmon_cfgs(3);
10139 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10141 let mut config = test_default_channel_config();
10142 // Set the dust limit to the default value
10143 config.channel_config.max_dust_htlc_exposure =
10144 MaxDustHTLCExposure::FeeRateMultiplier(10_000);
10145 // Make sure the HTLC limits don't get in the way
10146 config.channel_handshake_limits.min_max_accepted_htlcs = 400;
10147 config.channel_handshake_config.our_max_accepted_htlcs = 400;
10148 config.channel_handshake_config.our_htlc_minimum_msat = 1;
10150 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
10151 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10153 // Create a channel from 1 -> 0 but immediately push all of the funds towards 0
10154 let chan_id_1 = create_announced_chan_between_nodes(&nodes, 1, 0).2;
10155 while nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat > 0 {
10156 send_payment(&nodes[1], &[&nodes[0]], nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat);
10159 // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs
10160 // repeatedly until we run out of space.
10161 const HTLC_VALUE: u64 = 1_000_000; // Doesn't matter, tune until the test passes
10162 let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE).0;
10164 while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 {
10165 route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE);
10167 assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0,
10168 "We don't want to run out of ability to send because of some non-dust limit");
10169 assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10,
10170 "We should be able to fill our dust limit without too many HTLCs");
10172 let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat;
10173 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
10174 assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0,
10175 "Make sure we are able to send once we clear one HTLC");
10177 // At this point we have somewhere between dust_limit and dust_limit * 2 left in our dust
10178 // exposure limit, and we want to max that out using non-dust HTLCs.
10179 let commitment_tx_per_htlc_cost =
10180 htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * 253;
10181 let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost;
10182 assert!(max_htlcs_remaining < 30,
10183 "We should be able to fill our dust limit without too many HTLCs");
10184 for i in 0..max_htlcs_remaining + 1 {
10185 assert_ne!(i, max_htlcs_remaining);
10186 if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat < dust_limit {
10187 // We found our limit, and it was less than max_htlcs_remaining!
10188 // At this point we can only send dust HTLCs as any non-dust HTLCs will overuse our
10189 // remaining dust exposure.
10192 route_payment(&nodes[0], &[&nodes[1]], dust_limit * 2);
10195 // At this point non-dust HTLCs are no longer accepted from node 0 -> 1, we also check that
10196 // such HTLCs can't be routed over the same channel either.
10197 create_announced_chan_between_nodes(&nodes, 2, 0);
10198 let (route, payment_hash, _, payment_secret) =
10199 get_route_and_payment_hash!(nodes[2], nodes[1], dust_limit * 2);
10200 let onion = RecipientOnionFields::secret_only(payment_secret);
10201 nodes[2].node.send_payment_with_route(&route, payment_hash, onion, PaymentId([0; 32])).unwrap();
10202 check_added_monitors(&nodes[2], 1);
10203 let send = SendEvent::from_node(&nodes[2]);
10205 nodes[0].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send.msgs[0]);
10206 commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true);
10208 expect_pending_htlcs_forwardable!(nodes[0]);
10209 check_added_monitors(&nodes[0], 1);
10210 let node_id_1 = nodes[1].node.get_our_node_id();
10211 expect_htlc_handling_failed_destinations!(
10212 nodes[0].node.get_and_clear_pending_events(),
10213 &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }]
10216 let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id());
10217 nodes[2].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]);
10218 commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false);
10219 expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new());
10224 fn test_non_final_funding_tx() {
10225 let chanmon_cfgs = create_chanmon_cfgs(2);
10226 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10227 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10228 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10230 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10231 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10232 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10233 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10234 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10236 let best_height = nodes[0].node.best_block.read().unwrap().height;
10238 let chan_id = *nodes[0].network_chan_count.borrow();
10239 let events = nodes[0].node.get_and_clear_pending_events();
10240 let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[&[1]]) };
10241 assert_eq!(events.len(), 1);
10242 let mut tx = match events[0] {
10243 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10244 // Timelock the transaction _beyond_ the best client height + 1.
10245 Transaction { version: chan_id as i32, lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut {
10246 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
10249 _ => panic!("Unexpected event"),
10251 // Transaction should fail as it's evaluated as non-final for propagation.
10252 match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
10253 Err(APIError::APIMisuseError { err }) => {
10254 assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
10258 let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned();
10259 check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]);
10260 assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel");
10264 fn test_non_final_funding_tx_within_headroom() {
10265 let chanmon_cfgs = create_chanmon_cfgs(2);
10266 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10267 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10268 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10270 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10271 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10272 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10273 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10274 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10276 let best_height = nodes[0].node.best_block.read().unwrap().height;
10278 let chan_id = *nodes[0].network_chan_count.borrow();
10279 let events = nodes[0].node.get_and_clear_pending_events();
10280 let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[[1]]) };
10281 assert_eq!(events.len(), 1);
10282 let mut tx = match events[0] {
10283 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10284 // Timelock the transaction within a +1 headroom from the best block.
10285 Transaction { version: chan_id as i32, lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut {
10286 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
10289 _ => panic!("Unexpected event"),
10292 // Transaction should be accepted if it's in a +1 headroom from best block.
10293 assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
10294 get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
10298 fn accept_busted_but_better_fee() {
10299 // If a peer sends us a fee update that is too low, but higher than our previous channel
10300 // feerate, we should accept it. In the future we may want to consider closing the channel
10301 // later, but for now we only accept the update.
10302 let mut chanmon_cfgs = create_chanmon_cfgs(2);
10303 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10304 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10305 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10307 create_chan_between_nodes(&nodes[0], &nodes[1]);
10309 // Set nodes[1] to expect 5,000 sat/kW.
10311 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
10312 *feerate_lock = 5000;
10315 // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
10317 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10318 *feerate_lock = 1000;
10320 nodes[0].node.timer_tick_occurred();
10321 check_added_monitors!(nodes[0], 1);
10323 let events = nodes[0].node.get_and_clear_pending_msg_events();
10324 assert_eq!(events.len(), 1);
10326 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10327 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10328 commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10330 _ => panic!("Unexpected event"),
10333 // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
10336 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10337 *feerate_lock = 2000;
10339 nodes[0].node.timer_tick_occurred();
10340 check_added_monitors!(nodes[0], 1);
10342 let events = nodes[0].node.get_and_clear_pending_msg_events();
10343 assert_eq!(events.len(), 1);
10345 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10346 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10347 commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10349 _ => panic!("Unexpected event"),
10352 // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
10355 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10356 *feerate_lock = 1000;
10358 nodes[0].node.timer_tick_occurred();
10359 check_added_monitors!(nodes[0], 1);
10361 let events = nodes[0].node.get_and_clear_pending_msg_events();
10362 assert_eq!(events.len(), 1);
10364 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
10365 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10366 check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
10367 err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000".to_owned() },
10368 [nodes[0].node.get_our_node_id()], 100000);
10369 check_closed_broadcast!(nodes[1], true);
10370 check_added_monitors!(nodes[1], 1);
10372 _ => panic!("Unexpected event"),
10376 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
10377 let mut chanmon_cfgs = create_chanmon_cfgs(2);
10378 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10379 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10380 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10381 let min_final_cltv_expiry_delta = 120;
10382 let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
10383 min_final_cltv_expiry_delta - 2 };
10384 let recv_value = 100_000;
10386 create_chan_between_nodes(&nodes[0], &nodes[1]);
10388 let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
10389 let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
10390 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
10391 Some(recv_value), Some(min_final_cltv_expiry_delta));
10392 (payment_hash, payment_preimage, payment_secret)
10394 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
10395 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
10397 let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap();
10398 nodes[0].node.send_payment_with_route(&route, payment_hash,
10399 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10400 check_added_monitors!(nodes[0], 1);
10401 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
10402 assert_eq!(events.len(), 1);
10403 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
10404 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
10405 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
10406 expect_pending_htlcs_forwardable!(nodes[1]);
10409 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
10410 None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
10412 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
10414 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
10416 check_added_monitors!(nodes[1], 1);
10418 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
10419 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
10420 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
10422 expect_payment_failed!(nodes[0], payment_hash, true);
10427 fn test_payment_with_custom_min_cltv_expiry_delta() {
10428 do_payment_with_custom_min_final_cltv_expiry(false, false);
10429 do_payment_with_custom_min_final_cltv_expiry(false, true);
10430 do_payment_with_custom_min_final_cltv_expiry(true, false);
10431 do_payment_with_custom_min_final_cltv_expiry(true, true);
10435 fn test_disconnects_peer_awaiting_response_ticks() {
10436 // Tests that nodes which are awaiting on a response critical for channel responsiveness
10437 // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10438 let mut chanmon_cfgs = create_chanmon_cfgs(2);
10439 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10440 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10441 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10443 // Asserts a disconnect event is queued to the user.
10444 let check_disconnect_event = |node: &Node, should_disconnect: bool| {
10445 let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
10446 if let MessageSendEvent::HandleError { action, .. } = event {
10447 if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
10456 assert_eq!(disconnect_event.is_some(), should_disconnect);
10459 // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
10460 // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10461 let check_disconnect = |node: &Node| {
10462 // No disconnect without any timer ticks.
10463 check_disconnect_event(node, false);
10465 // No disconnect with 1 timer tick less than required.
10466 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
10467 node.node.timer_tick_occurred();
10468 check_disconnect_event(node, false);
10471 // Disconnect after reaching the required ticks.
10472 node.node.timer_tick_occurred();
10473 check_disconnect_event(node, true);
10475 // Disconnect again on the next tick if the peer hasn't been disconnected yet.
10476 node.node.timer_tick_occurred();
10477 check_disconnect_event(node, true);
10480 create_chan_between_nodes(&nodes[0], &nodes[1]);
10482 // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
10483 *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
10484 nodes[0].node.timer_tick_occurred();
10485 check_added_monitors!(&nodes[0], 1);
10486 let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10487 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
10488 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
10489 check_added_monitors!(&nodes[1], 1);
10491 // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
10492 let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
10493 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
10494 check_added_monitors!(&nodes[0], 1);
10495 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
10496 check_added_monitors(&nodes[0], 1);
10498 // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
10499 // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
10500 // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10501 let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
10502 check_disconnect(&nodes[1]);
10504 // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
10506 // Note that since the commitment dance didn't complete above, Alice is expected to resend her
10507 // final `RevokeAndACK` to Bob to complete it.
10508 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10509 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10510 let bob_init = msgs::Init {
10511 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10513 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
10514 let alice_init = msgs::Init {
10515 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10517 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
10519 // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
10520 // received Bob's yet, so she should disconnect him after reaching
10521 // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10522 let alice_channel_reestablish = get_event_msg!(
10523 nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
10525 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
10526 check_disconnect(&nodes[0]);
10528 // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
10529 let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
10530 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
10531 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
10537 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
10539 // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
10540 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10541 nodes[0].node.timer_tick_occurred();
10542 check_disconnect_event(&nodes[0], false);
10545 // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
10546 // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10547 check_disconnect(&nodes[1]);
10549 // Finally, have Bob process the last message.
10550 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
10551 check_added_monitors(&nodes[1], 1);
10553 // At this point, neither node should attempt to disconnect each other, since they aren't
10554 // waiting on any messages.
10555 for node in &nodes {
10556 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10557 node.node.timer_tick_occurred();
10558 check_disconnect_event(node, false);
10564 fn test_remove_expired_outbound_unfunded_channels() {
10565 let chanmon_cfgs = create_chanmon_cfgs(2);
10566 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10567 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10568 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10570 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10571 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10572 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10573 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10574 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10576 let events = nodes[0].node.get_and_clear_pending_events();
10577 assert_eq!(events.len(), 1);
10579 Event::FundingGenerationReady { .. } => (),
10580 _ => panic!("Unexpected event"),
10583 // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
10584 let check_outbound_channel_existence = |should_exist: bool| {
10585 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10586 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10587 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10590 // Channel should exist without any timer ticks.
10591 check_outbound_channel_existence(true);
10593 // Channel should exist with 1 timer tick less than required.
10594 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10595 nodes[0].node.timer_tick_occurred();
10596 check_outbound_channel_existence(true)
10599 // Remove channel after reaching the required ticks.
10600 nodes[0].node.timer_tick_occurred();
10601 check_outbound_channel_existence(false);
10603 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10604 assert_eq!(msg_events.len(), 1);
10605 match msg_events[0] {
10606 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10607 assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10609 _ => panic!("Unexpected event"),
10611 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
10615 fn test_remove_expired_inbound_unfunded_channels() {
10616 let chanmon_cfgs = create_chanmon_cfgs(2);
10617 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10618 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10619 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10621 let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10622 let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10623 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10624 let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10625 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10627 let events = nodes[0].node.get_and_clear_pending_events();
10628 assert_eq!(events.len(), 1);
10630 Event::FundingGenerationReady { .. } => (),
10631 _ => panic!("Unexpected event"),
10634 // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
10635 let check_inbound_channel_existence = |should_exist: bool| {
10636 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
10637 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
10638 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10641 // Channel should exist without any timer ticks.
10642 check_inbound_channel_existence(true);
10644 // Channel should exist with 1 timer tick less than required.
10645 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10646 nodes[1].node.timer_tick_occurred();
10647 check_inbound_channel_existence(true)
10650 // Remove channel after reaching the required ticks.
10651 nodes[1].node.timer_tick_occurred();
10652 check_inbound_channel_existence(false);
10654 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
10655 assert_eq!(msg_events.len(), 1);
10656 match msg_events[0] {
10657 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10658 assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10660 _ => panic!("Unexpected event"),
10662 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
10666 fn test_channel_close_when_not_timely_accepted() {
10667 // Create network of two nodes
10668 let chanmon_cfgs = create_chanmon_cfgs(2);
10669 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10670 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10671 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10673 // Simulate peer-disconnects mid-handshake
10674 // The channel is initiated from the node 0 side,
10675 // but the nodes disconnect before node 1 could send accept channel
10676 let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10677 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10678 assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10680 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10681 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10683 // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10684 assert_eq!(nodes[0].node.list_channels().len(), 1);
10686 // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
10687 assert_eq!(nodes[1].node.list_channels().len(), 0);
10689 // In the meantime, some time passes.
10690 for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
10691 nodes[0].node.timer_tick_occurred();
10694 // Since we disconnected from peer and did not connect back within time,
10695 // we should have forced-closed the channel by now.
10696 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
10697 assert_eq!(nodes[0].node.list_channels().len(), 0);
10700 // Since accept channel message was never received
10701 // The channel should be forced close by now from node 0 side
10702 // and the peer removed from per_peer_state
10703 let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10704 assert_eq!(node_0_per_peer_state.len(), 0);
10709 fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
10710 // Create network of two nodes
10711 let chanmon_cfgs = create_chanmon_cfgs(2);
10712 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10713 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10714 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10716 // Simulate peer-disconnects mid-handshake
10717 // The channel is initiated from the node 0 side,
10718 // but the nodes disconnect before node 1 could send accept channel
10719 let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10720 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10721 assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10723 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10724 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10726 // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10727 assert_eq!(nodes[0].node.list_channels().len(), 1);
10729 // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
10730 assert_eq!(nodes[1].node.list_channels().len(), 0);
10732 // The peers now reconnect
10733 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
10734 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10736 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
10737 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10738 }, false).unwrap();
10740 // Make sure the SendOpenChannel message is added to node_0 pending message events
10741 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10742 assert_eq!(msg_events.len(), 1);
10743 match &msg_events[0] {
10744 MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
10745 _ => panic!("Unexpected message."),
10749 fn do_test_multi_post_event_actions(do_reload: bool) {
10750 // Tests handling multiple post-Event actions at once.
10751 // There is specific code in ChannelManager to handle channels where multiple post-Event
10752 // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
10754 // Specifically, we test calling `get_and_clear_pending_events` while there are two
10755 // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
10756 // - one from an RAA and one from an inbound commitment_signed.
10757 let chanmon_cfgs = create_chanmon_cfgs(3);
10758 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10759 let (persister, chain_monitor);
10760 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10761 let nodes_0_deserialized;
10762 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10764 let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
10765 let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
10767 send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10768 send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10770 let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10771 let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10773 nodes[1].node.claim_funds(our_payment_preimage);
10774 check_added_monitors!(nodes[1], 1);
10775 expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
10777 nodes[2].node.claim_funds(payment_preimage_2);
10778 check_added_monitors!(nodes[2], 1);
10779 expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
10781 for dest in &[1, 2] {
10782 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
10783 nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
10784 commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
10785 check_added_monitors(&nodes[0], 0);
10788 let (route, payment_hash_3, _, payment_secret_3) =
10789 get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
10790 let payment_id = PaymentId(payment_hash_3.0);
10791 nodes[1].node.send_payment_with_route(&route, payment_hash_3,
10792 RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
10793 check_added_monitors(&nodes[1], 1);
10795 let send_event = SendEvent::from_node(&nodes[1]);
10796 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
10797 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
10798 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
10801 let nodes_0_serialized = nodes[0].node.encode();
10802 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
10803 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
10804 reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
10806 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10807 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10809 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
10810 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
10813 let events = nodes[0].node.get_and_clear_pending_events();
10814 assert_eq!(events.len(), 4);
10815 if let Event::PaymentSent { payment_preimage, .. } = events[0] {
10816 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10817 } else { panic!(); }
10818 if let Event::PaymentSent { payment_preimage, .. } = events[1] {
10819 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10820 } else { panic!(); }
10821 if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
10822 if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
10824 // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
10825 // completion, we'll respond to nodes[1] with an RAA + CS.
10826 get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10827 check_added_monitors(&nodes[0], 3);
10831 fn test_multi_post_event_actions() {
10832 do_test_multi_post_event_actions(true);
10833 do_test_multi_post_event_actions(false);
10837 fn test_batch_channel_open() {
10838 let chanmon_cfgs = create_chanmon_cfgs(3);
10839 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10840 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10841 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10843 // Initiate channel opening and create the batch channel funding transaction.
10844 let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10845 (&nodes[1], 100_000, 0, 42, None),
10846 (&nodes[2], 200_000, 0, 43, None),
10849 // Go through the funding_created and funding_signed flow with node 1.
10850 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10851 check_added_monitors(&nodes[1], 1);
10852 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10854 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10855 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10856 check_added_monitors(&nodes[0], 1);
10858 // The transaction should not have been broadcast before all channels are ready.
10859 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
10861 // Go through the funding_created and funding_signed flow with node 2.
10862 nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
10863 check_added_monitors(&nodes[2], 1);
10864 expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
10866 let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10867 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
10868 nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
10869 check_added_monitors(&nodes[0], 1);
10871 // The transaction should not have been broadcast before persisting all monitors has been
10873 assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10874 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
10876 // Complete the persistence of the monitor.
10877 nodes[0].chain_monitor.complete_sole_pending_chan_update(
10878 &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
10880 let events = nodes[0].node.get_and_clear_pending_events();
10882 // The transaction should only have been broadcast now.
10883 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10884 assert_eq!(broadcasted_txs.len(), 1);
10885 assert_eq!(broadcasted_txs[0], tx);
10887 assert_eq!(events.len(), 2);
10888 assert!(events.iter().any(|e| matches!(
10890 crate::events::Event::ChannelPending {
10891 ref counterparty_node_id,
10893 } if counterparty_node_id == &nodes[1].node.get_our_node_id(),
10895 assert!(events.iter().any(|e| matches!(
10897 crate::events::Event::ChannelPending {
10898 ref counterparty_node_id,
10900 } if counterparty_node_id == &nodes[2].node.get_our_node_id(),
10905 fn test_close_in_funding_batch() {
10906 // This test ensures that if one of the channels
10907 // in the batch closes, the complete batch will close.
10908 let chanmon_cfgs = create_chanmon_cfgs(3);
10909 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10910 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10911 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10913 // Initiate channel opening and create the batch channel funding transaction.
10914 let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10915 (&nodes[1], 100_000, 0, 42, None),
10916 (&nodes[2], 200_000, 0, 43, None),
10919 // Go through the funding_created and funding_signed flow with node 1.
10920 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10921 check_added_monitors(&nodes[1], 1);
10922 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10924 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10925 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10926 check_added_monitors(&nodes[0], 1);
10928 // The transaction should not have been broadcast before all channels are ready.
10929 assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10931 // Force-close the channel for which we've completed the initial monitor.
10932 let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
10933 let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
10934 let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
10935 let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
10937 nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
10939 // The monitor should become closed.
10940 check_added_monitors(&nodes[0], 1);
10942 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
10943 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
10944 assert_eq!(monitor_updates_1.len(), 1);
10945 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
10948 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10949 match msg_events[0] {
10950 MessageSendEvent::HandleError { .. } => (),
10951 _ => panic!("Unexpected message."),
10954 // We broadcast the commitment transaction as part of the force-close.
10956 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10957 assert_eq!(broadcasted_txs.len(), 1);
10958 assert!(broadcasted_txs[0].txid() != tx.txid());
10959 assert_eq!(broadcasted_txs[0].input.len(), 1);
10960 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
10963 // All channels in the batch should close immediately.
10964 check_closed_events(&nodes[0], &[
10965 ExpectedCloseEvent {
10966 channel_id: Some(channel_id_1),
10967 discard_funding: true,
10968 channel_funding_txo: Some(funding_txo_1),
10969 user_channel_id: Some(42),
10970 ..Default::default()
10972 ExpectedCloseEvent {
10973 channel_id: Some(channel_id_2),
10974 discard_funding: true,
10975 channel_funding_txo: Some(funding_txo_2),
10976 user_channel_id: Some(43),
10977 ..Default::default()
10981 // Ensure the channels don't exist anymore.
10982 assert!(nodes[0].node.list_channels().is_empty());
10986 fn test_batch_funding_close_after_funding_signed() {
10987 let chanmon_cfgs = create_chanmon_cfgs(3);
10988 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10989 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10990 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10992 // Initiate channel opening and create the batch channel funding transaction.
10993 let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10994 (&nodes[1], 100_000, 0, 42, None),
10995 (&nodes[2], 200_000, 0, 43, None),
10998 // Go through the funding_created and funding_signed flow with node 1.
10999 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
11000 check_added_monitors(&nodes[1], 1);
11001 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
11003 let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11004 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
11005 check_added_monitors(&nodes[0], 1);
11007 // Go through the funding_created and funding_signed flow with node 2.
11008 nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
11009 check_added_monitors(&nodes[2], 1);
11010 expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
11012 let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11013 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
11014 nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
11015 check_added_monitors(&nodes[0], 1);
11017 // The transaction should not have been broadcast before all channels are ready.
11018 assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
11020 // Force-close the channel for which we've completed the initial monitor.
11021 let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
11022 let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
11023 let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
11024 let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
11025 nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
11026 check_added_monitors(&nodes[0], 2);
11028 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
11029 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
11030 assert_eq!(monitor_updates_1.len(), 1);
11031 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11032 let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
11033 assert_eq!(monitor_updates_2.len(), 1);
11034 assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11036 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
11037 match msg_events[0] {
11038 MessageSendEvent::HandleError { .. } => (),
11039 _ => panic!("Unexpected message."),
11042 // We broadcast the commitment transaction as part of the force-close.
11044 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
11045 assert_eq!(broadcasted_txs.len(), 1);
11046 assert!(broadcasted_txs[0].txid() != tx.txid());
11047 assert_eq!(broadcasted_txs[0].input.len(), 1);
11048 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
11051 // All channels in the batch should close immediately.
11052 check_closed_events(&nodes[0], &[
11053 ExpectedCloseEvent {
11054 channel_id: Some(channel_id_1),
11055 discard_funding: true,
11056 channel_funding_txo: Some(funding_txo_1),
11057 user_channel_id: Some(42),
11058 ..Default::default()
11060 ExpectedCloseEvent {
11061 channel_id: Some(channel_id_2),
11062 discard_funding: true,
11063 channel_funding_txo: Some(funding_txo_2),
11064 user_channel_id: Some(43),
11065 ..Default::default()
11069 // Ensure the channels don't exist anymore.
11070 assert!(nodes[0].node.list_channels().is_empty());
11073 fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitment: bool) {
11074 // Tests that a node will forget the channel (when it only requires 1 confirmation) if the
11075 // funding and commitment transaction confirm in the same block.
11076 let chanmon_cfgs = create_chanmon_cfgs(2);
11077 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11078 let mut min_depth_1_block_cfg = test_default_channel_config();
11079 min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1;
11080 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg), Some(min_depth_1_block_cfg)]);
11081 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11083 let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
11084 let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
11086 assert_eq!(nodes[0].node.list_channels().len(), 1);
11087 assert_eq!(nodes[1].node.list_channels().len(), 1);
11089 let (closing_node, other_node) = if confirm_remote_commitment {
11090 (&nodes[1], &nodes[0])
11092 (&nodes[0], &nodes[1])
11095 closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
11096 let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
11097 assert_eq!(msg_events.len(), 1);
11098 match msg_events.pop().unwrap() {
11099 MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {},
11100 _ => panic!("Unexpected event"),
11102 check_added_monitors(closing_node, 1);
11103 check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed, false, &[other_node.node.get_our_node_id()], 1_000_000);
11105 let commitment_tx = {
11106 let mut txn = closing_node.tx_broadcaster.txn_broadcast();
11107 assert_eq!(txn.len(), 1);
11108 let commitment_tx = txn.pop().unwrap();
11109 check_spends!(commitment_tx, funding_tx);
11113 mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]);
11114 mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]);
11116 check_closed_broadcast(other_node, 1, true);
11117 check_added_monitors(other_node, 1);
11118 check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000);
11120 assert!(nodes[0].node.list_channels().is_empty());
11121 assert!(nodes[1].node.list_channels().is_empty());
11125 fn test_funding_and_commitment_tx_confirm_same_block() {
11126 do_test_funding_and_commitment_tx_confirm_same_block(false);
11127 do_test_funding_and_commitment_tx_confirm_same_block(true);
11131 fn test_accept_inbound_channel_errors_queued() {
11132 // For manually accepted inbound channels, tests that a close error is correctly handled
11133 // and the channel fails for the initiator.
11134 let mut config0 = test_default_channel_config();
11135 let mut config1 = config0.clone();
11136 config1.channel_handshake_limits.their_to_self_delay = 1000;
11137 config1.manually_accept_inbound_channels = true;
11138 config0.channel_handshake_config.our_to_self_delay = 2000;
11140 let chanmon_cfgs = create_chanmon_cfgs(2);
11141 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11142 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
11143 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11145 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
11146 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
11148 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
11149 let events = nodes[1].node.get_and_clear_pending_events();
11151 Event::OpenChannelRequest { temporary_channel_id, .. } => {
11152 match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
11153 Err(APIError::ChannelUnavailable { err: _ }) => (),
11157 _ => panic!("Unexpected event"),
11159 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
11160 open_channel_msg.common_fields.temporary_channel_id);